Skip to content

Commit fb9628b

Browse files
committed
update readme
1 parent f15ce10 commit fb9628b

File tree

3 files changed

+42
-73
lines changed

3 files changed

+42
-73
lines changed

docs/source/conf.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@
2323

2424
templates_path = ['_templates']
2525
exclude_patterns = []
26-
autodoc_typehints = "description"
2726

2827

2928
# -- Options for HTML output -------------------------------------------------

docs/source/index copy.rst

Lines changed: 0 additions & 40 deletions
This file was deleted.

torch_molecule/generator/graph_dit/modeling_graph_dit.py

Lines changed: 42 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -26,38 +26,48 @@ class GraphDITMolecularGenerator(BaseMolecularGenerator):
2626
2727
Reference Code: https://github.com/liugangcode/Graph-DiT
2828
29-
:param num_layer: Number of transformer layers, defaults to 6
30-
:type num_layer: int, optional
31-
:param hidden_size: Dimension of hidden layers, defaults to 1152
32-
:type hidden_size: int, optional
33-
:param dropout: Dropout rate for transformer layers, defaults to 0.0
34-
:type dropout: float, optional
35-
:param drop_condition: Dropout rate for condition embedding, defaults to 0.0
36-
:type drop_condition: float, optional
37-
:param num_head: Number of attention heads in transformer, defaults to 16
38-
:type num_head: int, optional
39-
:param mlp_ratio: Ratio of MLP hidden dimension to transformer hidden dimension, defaults to 4
40-
:type mlp_ratio: float, optional
41-
:param task_type: List specifying type of each task ('regression' or 'classification'), defaults to []
42-
:type task_type: List[str], optional
43-
:param timesteps: Number of diffusion timesteps, defaults to 500
44-
:type timesteps: int, optional
45-
:param batch_size: Batch size for training, defaults to 128
46-
:type batch_size: int, optional
47-
:param epochs: Number of training epochs, defaults to 10000
48-
:type epochs: int, optional
49-
:param learning_rate: Learning rate for optimization, defaults to 0.0002
50-
:type learning_rate: float, optional
51-
:param grad_clip_value: Value for gradient clipping (None = no clipping), defaults to None
52-
:type grad_clip_value: Optional[float], optional
53-
:param weight_decay: Weight decay for optimization, defaults to 0.0
54-
:type weight_decay: float, optional
55-
:param lw_X: Loss weight for node reconstruction, defaults to 1
56-
:type lw_X: float, optional
57-
:param lw_E: Loss weight for edge reconstruction, defaults to 10
58-
:type lw_E: float, optional
59-
:param guide_scale: Scale factor for classifier-free guidance during sampling, defaults to 2.0
60-
:type guide_scale: float, optional
29+
Parameters
30+
----------
31+
num_layer : int, default=6
32+
Number of transformer layers
33+
hidden_size : int, default=1152
34+
Dimension of hidden layers
35+
dropout : float, default=0.0
36+
Dropout rate for transformer layers
37+
drop_condition : float, default=0.0
38+
Dropout rate for condition embedding
39+
num_head : int, default=16
40+
Number of attention heads in transformer
41+
mlp_ratio : float, default=4
42+
Ratio of MLP hidden dimension to transformer hidden dimension
43+
task_type : List[str], default=[]
44+
List specifying type of each task ('regression' or 'classification')
45+
timesteps : int, default=500
46+
Number of diffusion timesteps
47+
batch_size : int, default=128
48+
Batch size for training
49+
epochs : int, default=10000
50+
Number of training epochs
51+
learning_rate : float, default=0.0002
52+
Learning rate for optimization
53+
grad_clip_value : Optional[float], default=None
54+
Value for gradient clipping (None = no clipping)
55+
weight_decay : float, default=0.0
56+
Weight decay for optimization
57+
lw_X : float, default=1
58+
Loss weight for node reconstruction
59+
lw_E : float, default=5
60+
Loss weight for edge reconstruction
61+
guide_scale : float, default=2.0
62+
Scale factor for classifier-free guidance during sampling
63+
use_lr_scheduler : bool, default=False
64+
Whether to use learning rate scheduler
65+
scheduler_factor : float, default=0.5
66+
Factor by which to reduce learning rate on plateau
67+
scheduler_patience : int, default=5
68+
Number of epochs with no improvement after which learning rate will be reduced
69+
verbose : bool, default=False
70+
Whether to display progress bars and logs
6171
"""
6272
# Model parameters
6373
num_layer: int = 6

0 commit comments

Comments
 (0)