You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
/// Gaussian error linear unit (GELU) computes x * P(X <= x), where P(X) ~ N(0, 1). The (GELU) nonlinearity weights inputs by their value, rather than gates inputs by their sign as in ReLU.
163
+
/// </summary>
164
+
/// <param name="x">Input tensor.</param>
165
+
/// <param name="approximate">A bool, whether to enable approximation.</param>
@@ -183,18 +185,18 @@ public class ModelCheckpoint : Callback
183
185
/// <param name="save_best_only">if save_best_only=True, the latest best model according to the quantity monitored will not be overwritten.</param>
184
186
/// <param name="save_weights_only"> if True, then only the model's weights will be saved (model.save_weights(filepath)), else the full model is saved (model.save(filepath)).</param>
185
187
/// <param name="mode">one of {auto, min, max}. If save_best_only=True, the decision to overwrite the current save file is made based on either the maximization or the minimization of the monitored quantity. For val_acc, this should be max, for val_loss this should be min, etc. In auto mode, the direction is automatically inferred from the name of the monitored quantity.</param>
186
-
/// <param name="period">Interval (number of epochs) between checkpoints.</param>
/// <param name="save_freq">'epoch' or integer. When using 'epoch', the callback saves the model after each epoch. When using integer, the callback saves the model at end of this many batches.</param>
@@ -216,7 +218,9 @@ public class EarlyStopping : Callback
216
218
/// <param name="mode">one of {auto, min, max}. In min mode, training will stop when the quantity monitored has stopped decreasing; in max mode it will stop when the quantity monitored has stopped increasing; in auto mode, the direction is automatically inferred from the name of the monitored quantity.</param>
217
219
/// <param name="baseline"> Baseline value for the monitored quantity to reach. Training will stop if the model doesn't show improvement over the baseline.</param>
218
220
/// <param name="restore_best_weights"> whether to restore model weights from the epoch with the best value of the monitored quantity. If False, the model weights obtained at the last step of training are used.</param>
/// <param name="start_from_epoch"> Number of epochs to wait before starting to monitor improvement. This allows for a warm-up period in which no improvement is expected and thus training will not be stopped.</param>
@@ -294,28 +299,24 @@ public class TensorBoard : Callback
294
299
/// </summary>
295
300
/// <param name="log_dir"> the path of the directory where to save the log files to be parsed by TensorBoard.</param>
296
301
/// <param name="histogram_freq"> frequency (in epochs) at which to compute activation and weight histograms for the layers of the model. If set to 0, histograms won't be computed. Validation data (or split) must be specified for histogram visualizations.</param>
297
-
/// <param name="batch_size"> size of batch of inputs to feed to the network for histograms computation.</param>
298
302
/// <param name="write_graph"> whether to visualize the graph in TensorBoard. The log file can become quite large when write_graph is set to True.</param>
299
-
/// <param name="write_grads"> whether to visualize gradient histograms in TensorBoard. histogram_freq must be greater than 0.</param>
300
303
/// <param name="write_images"> whether to write model weights to visualize as image in TensorBoard.</param>
304
+
/// <param name="write_steps_per_second"> whether to log the training steps per second into TensorBoard. This supports both epoch and batch frequency logging.</param>
305
+
/// <param name="update_freq"> 'batch' or 'epoch' or integer. When using 'epoch', writes the losses and metrics to TensorBoard after every epoch. </param>
301
306
/// <param name="embeddings_freq"> frequency (in epochs) at which selected embedding layers will be saved. If set to 0, embeddings won't be computed. Data to be visualized in TensorBoard's Embedding tab must be passed as embeddings_data.</param>
302
-
/// <param name="embeddings_layer_names"> a list of names of layers to keep eye on. If None or empty list all the embedding layer will be watched.</param>
303
307
/// <param name="embeddings_metadata"> a dictionary which maps layer name to a file name in which metadata for this embedding layer is saved. See the details about metadata files format. In case if the same metadata file is used for all embedding layers, string can be passed.</param>
304
-
/// <param name="embeddings_data"> data to be embedded at layers specified in embeddings_layer_names. Numpy array (if the model has a single input) or list of Numpy arrays (if the model has multiple inputs). Learn more about embeddings.</param>
/// BackupAndRestore callback is intended to recover training from an interruption that has happened in the middle of a Model.fit execution,
387
+
/// by backing up the training states in a temporary checkpoint file (with the help of a tf.train.CheckpointManager), at the end of each epoch.
388
+
/// </summary>
389
+
publicclassBackupAndRestore:Callback
390
+
{
391
+
/// <summary>
392
+
/// Initializes a new instance of the <see cref="BackupAndRestore" /> class.
393
+
/// </summary>
394
+
/// <param name="backup_dir">String, path to store the checkpoint. e.g. backup_dir = os.path.join(working_dir, 'backup')</param>
395
+
/// <param name="save_freq">'epoch', integer, or False. When set to 'epoch' the callback saves the checkpoint at the end of each epoch</param>
396
+
/// <param name="delete_checkpoint">Boolean, default to True. This BackupAndRestore callback works by saving a checkpoint to back up the training state</param>
397
+
/// <param name="save_before_preemption">A boolean value instructing whether to turn on the automatic checkpoint saving for preemption/maintenance events. </param>
0 commit comments