| { | |
| "best_metric": 0.5381352187361909, | |
| "best_model_checkpoint": "/mimer/NOBACKUP/groups/naiss2023-6-290/stefano/models//PROTAC-Splitter-EncoderDecoder-lr_reduce-opt25-rand-smiles/trial-number=17-learning_rate=2.8e-05-warmup_ratio=0.060-min_lr=0.000-factor=0.680/checkpoint-10000", | |
| "epoch": 0.9863878477017163, | |
| "eval_steps": 2500, | |
| "global_step": 10000, | |
| "is_hyper_param_search": true, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.049319392385085814, | |
| "grad_norm": 0.829456627368927, | |
| "learning_rate": 2.8331451115475792e-05, | |
| "loss": 1.1848, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.09863878477017163, | |
| "grad_norm": 0.6500025391578674, | |
| "learning_rate": 2.8331451115475792e-05, | |
| "loss": 0.2962, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.14795817715525744, | |
| "grad_norm": 0.47073793411254883, | |
| "learning_rate": 2.8331451115475792e-05, | |
| "loss": 0.1468, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.19727756954034326, | |
| "grad_norm": 0.329457551240921, | |
| "learning_rate": 2.8331451115475792e-05, | |
| "loss": 0.0878, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.24659696192542907, | |
| "grad_norm": 0.3172358572483063, | |
| "learning_rate": 2.8331451115475792e-05, | |
| "loss": 0.0581, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.2959163543105149, | |
| "grad_norm": 0.2527906894683838, | |
| "learning_rate": 2.8331451115475792e-05, | |
| "loss": 0.0424, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.3452357466956007, | |
| "grad_norm": 0.20880313217639923, | |
| "learning_rate": 2.8331451115475792e-05, | |
| "loss": 0.0323, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.3945551390806865, | |
| "grad_norm": 0.22946058213710785, | |
| "learning_rate": 2.8331451115475792e-05, | |
| "loss": 0.0257, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.4438745314657723, | |
| "grad_norm": 0.22088561952114105, | |
| "learning_rate": 2.8331451115475792e-05, | |
| "loss": 0.0207, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.49319392385085814, | |
| "grad_norm": 0.22565944492816925, | |
| "learning_rate": 2.8331451115475792e-05, | |
| "loss": 0.0175, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.49319392385085814, | |
| "eval_all_ligands_equal": 0.4470172337604949, | |
| "eval_e3_equal": 0.7797613787008396, | |
| "eval_e3_graph_edit_distance": 9.999999999999999e+63, | |
| "eval_e3_graph_edit_distance_norm": 1.0, | |
| "eval_e3_has_attachment_point(s)": 0.9953159522757402, | |
| "eval_e3_heavy_atoms_difference": 0.2501988510826337, | |
| "eval_e3_heavy_atoms_difference_norm": 0.0008378467337176671, | |
| "eval_e3_tanimoto_similarity": 0.0, | |
| "eval_e3_valid": 0.9953159522757402, | |
| "eval_has_all_attachment_points": 0.9885992045956694, | |
| "eval_has_three_substructures": 0.9992929739284137, | |
| "eval_heavy_atoms_difference": 5.620680512593902, | |
| "eval_heavy_atoms_difference_norm": 0.07341529464537826, | |
| "eval_linker_equal": 0.6448961555457358, | |
| "eval_linker_graph_edit_distance": 9.999999999999999e+63, | |
| "eval_linker_graph_edit_distance_norm": 1.0, | |
| "eval_linker_has_attachment_point(s)": 0.9975254087494476, | |
| "eval_linker_heavy_atoms_difference": 0.8009721608484313, | |
| "eval_linker_heavy_atoms_difference_norm": 0.02116394604640135, | |
| "eval_linker_tanimoto_similarity": 0.0, | |
| "eval_linker_valid": 0.9975254087494476, | |
| "eval_loss": 0.2864011228084564, | |
| "eval_num_fragments": 3.0, | |
| "eval_poi_equal": 0.7278833406981883, | |
| "eval_poi_graph_edit_distance": 9.999999999999999e+63, | |
| "eval_poi_graph_edit_distance_norm": 1.0, | |
| "eval_poi_has_attachment_point(s)": 0.9450287229341582, | |
| "eval_poi_heavy_atoms_difference": 1.7335395492708794, | |
| "eval_poi_heavy_atoms_difference_norm": 0.051064183804263204, | |
| "eval_poi_tanimoto_similarity": 0.0, | |
| "eval_poi_valid": 0.9450287229341582, | |
| "eval_reassembly": 0.45417587273530713, | |
| "eval_reassembly_nostereo": 0.4824569155987627, | |
| "eval_runtime": 1504.245, | |
| "eval_samples_per_second": 7.522, | |
| "eval_steps_per_second": 0.118, | |
| "eval_tanimoto_similarity": 0.0, | |
| "eval_valid": 0.9395492708793637, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.542513316235944, | |
| "grad_norm": 0.21528206765651703, | |
| "learning_rate": 2.8331451115475792e-05, | |
| "loss": 0.0151, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.5918327086210298, | |
| "grad_norm": 0.14188343286514282, | |
| "learning_rate": 2.8331451115475792e-05, | |
| "loss": 0.0133, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.6411521010061156, | |
| "grad_norm": 0.13938453793525696, | |
| "learning_rate": 2.8331451115475792e-05, | |
| "loss": 0.0116, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.6904714933912014, | |
| "grad_norm": 0.14794708788394928, | |
| "learning_rate": 2.8331451115475792e-05, | |
| "loss": 0.0102, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.7397908857762873, | |
| "grad_norm": 0.17807906866073608, | |
| "learning_rate": 2.8331451115475792e-05, | |
| "loss": 0.0092, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.7397908857762873, | |
| "eval_all_ligands_equal": 0.5155987627043748, | |
| "eval_e3_equal": 0.7997348652231551, | |
| "eval_e3_graph_edit_distance": 9.999999999999999e+63, | |
| "eval_e3_graph_edit_distance_norm": 1.0, | |
| "eval_e3_has_attachment_point(s)": 0.9950508174988952, | |
| "eval_e3_heavy_atoms_difference": 0.2891736632788334, | |
| "eval_e3_heavy_atoms_difference_norm": 0.0028955297689230454, | |
| "eval_e3_tanimoto_similarity": 0.0, | |
| "eval_e3_valid": 0.9950508174988952, | |
| "eval_has_all_attachment_points": 0.9878038002651348, | |
| "eval_has_three_substructures": 0.9990278391515687, | |
| "eval_heavy_atoms_difference": 5.2259832081308, | |
| "eval_heavy_atoms_difference_norm": 0.06936488082967972, | |
| "eval_linker_equal": 0.7362792752982766, | |
| "eval_linker_graph_edit_distance": 9.999999999999999e+63, | |
| "eval_linker_graph_edit_distance_norm": 1.0, | |
| "eval_linker_has_attachment_point(s)": 0.9962881131241714, | |
| "eval_linker_heavy_atoms_difference": 0.46539991162174105, | |
| "eval_linker_heavy_atoms_difference_norm": 0.007389768929469861, | |
| "eval_linker_tanimoto_similarity": 0.0, | |
| "eval_linker_valid": 0.9962881131241714, | |
| "eval_loss": 0.3027403950691223, | |
| "eval_num_fragments": 3.0004418912947415, | |
| "eval_poi_equal": 0.7507733097657976, | |
| "eval_poi_graph_edit_distance": 9.999999999999999e+63, | |
| "eval_poi_graph_edit_distance_norm": 1.0, | |
| "eval_poi_has_attachment_point(s)": 0.947326557666814, | |
| "eval_poi_heavy_atoms_difference": 1.571011931064958, | |
| "eval_poi_heavy_atoms_difference_norm": 0.04874062832136298, | |
| "eval_poi_tanimoto_similarity": 0.0, | |
| "eval_poi_valid": 0.947326557666814, | |
| "eval_reassembly": 0.5235528060097216, | |
| "eval_reassembly_nostereo": 0.5519222271321255, | |
| "eval_runtime": 1514.2596, | |
| "eval_samples_per_second": 7.472, | |
| "eval_steps_per_second": 0.117, | |
| "eval_tanimoto_similarity": 0.0, | |
| "eval_valid": 0.9410517012814847, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.789110278161373, | |
| "grad_norm": 0.13021309673786163, | |
| "learning_rate": 2.8331451115475792e-05, | |
| "loss": 0.0083, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.8384296705464589, | |
| "grad_norm": 0.15480028092861176, | |
| "learning_rate": 2.8331451115475792e-05, | |
| "loss": 0.0078, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.8877490629315447, | |
| "grad_norm": 0.13444596529006958, | |
| "learning_rate": 2.8331451115475792e-05, | |
| "loss": 0.0069, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.9370684553166305, | |
| "grad_norm": 0.1448230892419815, | |
| "learning_rate": 2.8331451115475792e-05, | |
| "loss": 0.0065, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.9863878477017163, | |
| "grad_norm": 0.09779265522956848, | |
| "learning_rate": 2.8331451115475792e-05, | |
| "loss": 0.0061, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.9863878477017163, | |
| "eval_all_ligands_equal": 0.5381352187361909, | |
| "eval_e3_equal": 0.8015908086610694, | |
| "eval_e3_graph_edit_distance": 9.999999999999999e+63, | |
| "eval_e3_graph_edit_distance_norm": 1.0, | |
| "eval_e3_has_attachment_point(s)": 0.9930181175430844, | |
| "eval_e3_heavy_atoms_difference": 0.3586389748121962, | |
| "eval_e3_heavy_atoms_difference_norm": 0.004046618972503898, | |
| "eval_e3_tanimoto_similarity": 0.0, | |
| "eval_e3_valid": 0.9930181175430844, | |
| "eval_has_all_attachment_points": 0.9855943437914273, | |
| "eval_has_three_substructures": 0.9994697304463102, | |
| "eval_heavy_atoms_difference": 4.982677861246134, | |
| "eval_heavy_atoms_difference_norm": 0.06595580402806239, | |
| "eval_linker_equal": 0.7677419354838709, | |
| "eval_linker_graph_edit_distance": 9.999999999999999e+63, | |
| "eval_linker_graph_edit_distance_norm": 1.0, | |
| "eval_linker_has_attachment_point(s)": 0.9951391957578436, | |
| "eval_linker_heavy_atoms_difference": 0.29235528060097216, | |
| "eval_linker_heavy_atoms_difference_norm": -0.001514220633297117, | |
| "eval_linker_tanimoto_similarity": 0.0, | |
| "eval_linker_valid": 0.9951391957578436, | |
| "eval_loss": 0.3127844035625458, | |
| "eval_num_fragments": 2.9998232434821035, | |
| "eval_poi_equal": 0.7611135660627486, | |
| "eval_poi_graph_edit_distance": 9.999999999999999e+63, | |
| "eval_poi_graph_edit_distance_norm": 1.0, | |
| "eval_poi_has_attachment_point(s)": 0.9520106053910738, | |
| "eval_poi_heavy_atoms_difference": 1.4593901900132566, | |
| "eval_poi_heavy_atoms_difference_norm": 0.04606472460308478, | |
| "eval_poi_tanimoto_similarity": 0.0, | |
| "eval_poi_valid": 0.9520106053910738, | |
| "eval_reassembly": 0.5465311533362793, | |
| "eval_reassembly_nostereo": 0.5773751657092355, | |
| "eval_runtime": 1527.8773, | |
| "eval_samples_per_second": 7.406, | |
| "eval_steps_per_second": 0.116, | |
| "eval_tanimoto_similarity": 0.0, | |
| "eval_valid": 0.9422889969067609, | |
| "step": 10000 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 10000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 5000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 8.729673191023411e+16, | |
| "train_batch_size": 128, | |
| "trial_name": "trial-number=17-learning_rate=2.8e-05-warmup_ratio=0.060-min_lr=0.000-factor=0.680", | |
| "trial_params": { | |
| "factor": 0.6799999999999999, | |
| "learning_rate": 2.8331451115475792e-05, | |
| "min_lr": 7.147010870778695e-11, | |
| "warmup_ratio": 0.060000000000000005 | |
| } | |
| } | |