Skip to content

Commit

Permalink
add examples for minimal finetuna example
Browse files Browse the repository at this point in the history
  • Loading branch information
alchem0x2A committed Feb 22, 2024
1 parent d8001fa commit 90e6b1e
Show file tree
Hide file tree
Showing 3 changed files with 128 additions and 0 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -782,3 +782,4 @@ examples/ex1-ase/
/*.psp8
/test-1/
/test-2/
*.pt
54 changes: 54 additions & 0 deletions examples/active_learning/example_finetuna_minimal.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
"""A minimal example combining active learning library like Finetuna with SPARC
usage
First download the checkpoint from the url https://dl.fbaipublicfiles.com/opencatalystproject/models/2021_08/s2ef/gemnet_t_direct_h512_all.pt
python example_finetuna_minimal.py
"""
import torch
import os
import yaml
from pathlib import Path
from ase.io.trajectory import Trajectory
from ase.optimize import BFGS
from finetuna.ml_potentials.finetuner_ensemble_calc import FinetunerEnsembleCalc
from finetuna.online_learner.online_learner import OnlineLearner
import argparse
from sparc.calculator import SPARC

from ase.build import molecule

cpu = not torch.cuda.is_available()
curdir = Path(__file__).parent
config_file = curdir / "ft_config_gemnet_gpu.yml"
with open(config_file, "r") as fd:
configs = yaml.load(fd, Loader=yaml.FullLoader)

checkpoint = os.environ.get("CHECKPOINT_PATH", None)
if checkpoint is None:
# Use default (relative path)
checkpoint = curdir / configs["ocp"]["checkpoint_path_list"][0]
checkpoint = Path(checkpoint)

if not checkpoint.is_file():
raise FileNotFoundError("Cannot found the model checkpoint file!")

finetuner = configs["finetuner"]
finetuner[0].update(cpu=cpu)
learner = configs["learner"]

ml_potential = FinetunerEnsembleCalc(
checkpoint_paths=[checkpoint],
mlp_params=finetuner,
)


init_molecule = molecule("H2O", pbc=False, cell=[8, 8, 8])

sparc_params = {"xc": "pbe", "h": 0.22}
with SPARC(**sparc_params) as parent_calc:
onlinecalc = OnlineLearner(learner, [], ml_potential, parent_calc)
init_molecule.calc = onlinecalc
dyn = BFGS(init_molecule,
maxstep=0.2)
dyn.run(fmax=0.03)
73 changes: 73 additions & 0 deletions examples/active_learning/ft_config_gemnet_gpu.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
finetuner:
- cpu: false
optim:
batch_size: 1
break_below_lr: 1.0e-07
checkpoint_every: 100000
eps: 1.0e-08
eval_every: 1
factor: 0.9
force_coefficient: 100
lr_initial: 0.0003
max_epochs: 400
num_workers: 0
optimizer_params:
eps: 1.0e-08
weight_decay: 0
patience: 3
# print_loss_and_lr: true
scheduler_loss: train
weight_decay: 0
task:
primary_metric: loss
tuner:
num_threads: 8
unfreeze_blocks:
- out_blocks.3.seq_forces
- out_blocks.3.scale_rbf_F
- out_blocks.3.dense_rbf_F
- out_blocks.3.out_forces
- out_blocks.2.seq_forces
- out_blocks.2.scale_rbf_F
- out_blocks.2.dense_rbf_F
- out_blocks.2.out_forces
- out_blocks.1.seq_forces
- out_blocks.1.scale_rbf_F
- out_blocks.1.dense_rbf_F
- out_blocks.1.out_forces
learner:
dyn_avg_steps: 15
dyn_uncertain_tol: 1000000 # Dynamic uncertainty tolerance
# fmax_verify_threshold: 0.03 # Fmax threshold for calling VASP single point calculation
# initial_points_to_keep: []
# logger:
# pca_quantify: true # Log PCA on wandb
# uncertainty_quantify: false
num_initial_points: 0 # Number of VASP calls at the beginning of the relaxation
partial_fit: true
query_every_n_steps: 30 # K-steps querying strategy: query every 100 steps
stat_uncertain_tol: 1000000 # Static uncertainty tolerance
tolerance_selection: min
# valset_system_id: '1498818'
# wandb_init:
# entity: ulissi-group
# project: project
# group: group
# name: name
# notes: notes
# wandb_log: false # Wandb disabled
# optional_config:
# links:
# traj: /path/to/vasp/reference/traj
ocp:
model_class_list:
- gemnet
checkpoint_path_list:
- "gemnet_t_direct_h512_all.pt"
# relaxation:
# # fmax: 0.03
# max_parent_calls: null
# maxstep: 0.2
# replay_method: parent_only
# steps: null
# trajname: oal_relaxation.traj

0 comments on commit 90e6b1e

Please sign in to comment.