Skip to content

Commit cf11ce9

Browse files
author
RAA\deepakb4437
committed
Updated Activitaions, new calbacks, Optimizers. WIP Layers
1 parent 887cebb commit cf11ce9

19 files changed

+493
-108
lines changed

Examples/BasicSamples/EarlyStopExample.cs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
using Keras.Layers;
44
using Keras.Models;
55
using Keras.Optimizers;
6+
using Keras.Optimizers.Legacy;
67
using Numpy;
78
using System;
89
using System.Collections.Generic;

Examples/BasicSamples/ImplementCallback.cs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
using Keras.Layers;
44
using Keras.Models;
55
using Keras.Optimizers;
6+
using Keras.Optimizers.Legacy;
67
using Numpy;
78
using System;
89
using System.Collections.Generic;

Examples/BasicSamples/MNIST_CNN.cs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
using Keras.Layers;
1010
using Keras.Utils;
1111
using Keras.Optimizers;
12+
using Keras.Optimizers.Legacy;
1213

1314
namespace BasicSamples
1415
{

Examples/ImageExamples/Cifar10_CNN.cs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
using Keras.Optimizers;
1212
using Keras.PreProcessing.Image;
1313
using System.IO;
14+
using Keras.Optimizers.Legacy;
1415

1516
namespace ImageExamples
1617
{

Examples/ImageExamples/MNIST_CNN.cs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
using Keras.Utils;
1111
using Keras.Optimizers;
1212
using System.IO;
13+
using Keras.Optimizers.Legacy;
1314

1415
namespace ImageExamples
1516
{

Keras.NET.sln

Lines changed: 1 addition & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,9 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Keras", "Keras\Keras.csproj
1919
EndProject
2020
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "ReleaseBot", "ReleaseBot\ReleaseBot.csproj", "{2BAEA60C-88A2-45DC-8044-2C9571E1B8CF}"
2121
EndProject
22-
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "KerasExampleWinApp", "Examples\KerasExampleWinApp\KerasExampleWinApp.csproj", "{0C0B0830-4871-4979-8675-93F980F5EBE2}"
23-
EndProject
2422
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "MusicGeneration", "Examples\MusicGeneration\MusicGeneration.csproj", "{108C3326-58D2-4C26-9D78-5F045D620A26}"
2523
EndProject
26-
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Tensorflow", "Tensorflow\Tensorflow.csproj", "{27230C96-FCB4-406C-8AAD-450020F9074D}"
24+
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow", "Tensorflow\Tensorflow.csproj", "{27230C96-FCB4-406C-8AAD-450020F9074D}"
2725
EndProject
2826
Global
2927
GlobalSection(SolutionConfigurationPlatforms) = preSolution
@@ -255,38 +253,6 @@ Global
255253
{2BAEA60C-88A2-45DC-8044-2C9571E1B8CF}.Release|Any CPU.Build.0 = Release|Any CPU
256254
{2BAEA60C-88A2-45DC-8044-2C9571E1B8CF}.Release|x64.ActiveCfg = Release|Any CPU
257255
{2BAEA60C-88A2-45DC-8044-2C9571E1B8CF}.Release|x64.Build.0 = Release|Any CPU
258-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
259-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Debug|Any CPU.Build.0 = Debug|Any CPU
260-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Debug|x64.ActiveCfg = Debug|Any CPU
261-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Debug|x64.Build.0 = Debug|Any CPU
262-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Py2.7_Mono|Any CPU.ActiveCfg = Release|Any CPU
263-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Py2.7_Mono|Any CPU.Build.0 = Release|Any CPU
264-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Py2.7_Mono|x64.ActiveCfg = Release|Any CPU
265-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Py2.7_Mono|x64.Build.0 = Release|Any CPU
266-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Py2.7_OSX|Any CPU.ActiveCfg = Release|Any CPU
267-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Py2.7_OSX|Any CPU.Build.0 = Release|Any CPU
268-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Py2.7_OSX|x64.ActiveCfg = Release|Any CPU
269-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Py2.7_OSX|x64.Build.0 = Release|Any CPU
270-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Py2.7_WIN|Any CPU.ActiveCfg = Release|Any CPU
271-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Py2.7_WIN|Any CPU.Build.0 = Release|Any CPU
272-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Py2.7_WIN|x64.ActiveCfg = Release|Any CPU
273-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Py2.7_WIN|x64.Build.0 = Release|Any CPU
274-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Py3.6_Mono|Any CPU.ActiveCfg = Release|Any CPU
275-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Py3.6_Mono|Any CPU.Build.0 = Release|Any CPU
276-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Py3.6_Mono|x64.ActiveCfg = Release|Any CPU
277-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Py3.6_Mono|x64.Build.0 = Release|Any CPU
278-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Py3.6_OSX|Any CPU.ActiveCfg = Release|Any CPU
279-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Py3.6_OSX|Any CPU.Build.0 = Release|Any CPU
280-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Py3.6_OSX|x64.ActiveCfg = Release|Any CPU
281-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Py3.6_OSX|x64.Build.0 = Release|Any CPU
282-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Py3.6_WIN|Any CPU.ActiveCfg = Release|Any CPU
283-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Py3.6_WIN|Any CPU.Build.0 = Release|Any CPU
284-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Py3.6_WIN|x64.ActiveCfg = Release|Any CPU
285-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Py3.6_WIN|x64.Build.0 = Release|Any CPU
286-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Release|Any CPU.ActiveCfg = Release|Any CPU
287-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Release|Any CPU.Build.0 = Release|Any CPU
288-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Release|x64.ActiveCfg = Release|Any CPU
289-
{0C0B0830-4871-4979-8675-93F980F5EBE2}.Release|x64.Build.0 = Release|Any CPU
290256
{108C3326-58D2-4C26-9D78-5F045D620A26}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
291257
{108C3326-58D2-4C26-9D78-5F045D620A26}.Debug|Any CPU.Build.0 = Debug|Any CPU
292258
{108C3326-58D2-4C26-9D78-5F045D620A26}.Debug|x64.ActiveCfg = Debug|Any CPU
@@ -359,7 +325,6 @@ Global
359325
{A0786763-88EC-41DB-9E4F-6DDACA1A1162} = {96B07D94-46E0-4A1C-9484-E842B47FFE04}
360326
{EC18ED5C-A9EC-414F-948C-DD1BC052D312} = {96B07D94-46E0-4A1C-9484-E842B47FFE04}
361327
{7F906C3D-4C18-4185-8235-4908FC082398} = {96B07D94-46E0-4A1C-9484-E842B47FFE04}
362-
{0C0B0830-4871-4979-8675-93F980F5EBE2} = {96B07D94-46E0-4A1C-9484-E842B47FFE04}
363328
{108C3326-58D2-4C26-9D78-5F045D620A26} = {96B07D94-46E0-4A1C-9484-E842B47FFE04}
364329
EndGlobalSection
365330
GlobalSection(ExtensibilityGlobals) = postSolution

Keras.UnitTest/Keras.Layers.Core.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ public void Dense_CustomKRegularizerAndKInitParams()
4646
Assert.AreEqual(2000, modelAsJson.config.layers[i].config.kernel_regularizer.config.l2.Value);
4747

4848
// Compile and train
49-
model.Compile(optimizer: new Adam(lr: 0.001F), loss: "binary_crossentropy", metrics: new string[] { "accuracy" });
49+
model.Compile(optimizer: new Adam(learning_rate: 0.001F), loss: "binary_crossentropy", metrics: new string[] { "accuracy" });
5050
model.Fit(x, y, batch_size: x.shape[0], epochs: 100, verbose: 0);
5151
Assert.AreEqual(2, model.GetWeights().Count);
5252
}

Keras/Activations.cs

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -157,5 +157,43 @@ public static NDarray Linear(NDarray x)
157157
parameters["x"] = x;
158158
return new NDarray(InvokeStaticMethod(caller, "linear", parameters));
159159
}
160+
161+
/// <summary>
162+
/// Gaussian error linear unit (GELU) computes x * P(X <= x), where P(X) ~ N(0, 1). The (GELU) nonlinearity weights inputs by their value, rather than gates inputs by their sign as in ReLU.
163+
/// </summary>
164+
/// <param name="x">Input tensor.</param>
165+
/// <param name="approximate">A bool, whether to enable approximation.</param>
166+
/// <returns></returns>
167+
public static NDarray Gelu(NDarray x, bool approximate = false)
168+
{
169+
Dictionary<string, object> parameters = new Dictionary<string, object>();
170+
parameters["x"] = x;
171+
parameters["approximate"] = approximate;
172+
return new NDarray(InvokeStaticMethod(caller, "gelu", parameters));
173+
}
174+
175+
/// <summary>
176+
/// Mish activation function.
177+
/// </summary>
178+
/// <param name="x">Input tensor.</param>
179+
/// <returns>Output tensor</returns>
180+
public static NDarray Mish(NDarray x)
181+
{
182+
Dictionary<string, object> parameters = new Dictionary<string, object>();
183+
parameters["x"] = x;
184+
return new NDarray(InvokeStaticMethod(caller, "mish", parameters));
185+
}
186+
187+
/// <summary>
188+
/// Swish activation function, swish(x) = x * sigmoid(x).
189+
/// </summary>
190+
/// <param name="x">Input tensor.</param>
191+
/// <returns>Output tensor</returns>
192+
public static NDarray Swish(NDarray x)
193+
{
194+
Dictionary<string, object> parameters = new Dictionary<string, object>();
195+
parameters["x"] = x;
196+
return new NDarray(InvokeStaticMethod(caller, "swish", parameters));
197+
}
160198
}
161199
}

Keras/Callbacks.cs

Lines changed: 69 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@
66
using Python.Runtime;
77
using Numpy;
88
using System.IO;
9+
using static System.Net.WebRequestMethods;
10+
using Keras.Models;
911

1012
namespace Keras.Callbacks
1113
{
@@ -36,7 +38,7 @@ public static Callback Custom(string name, string fileOrcode, bool isFile = true
3638
string code = "";
3739
if(isFile)
3840
{
39-
code = File.ReadAllText(fileOrcode);
41+
code = System.IO.File.ReadAllText(fileOrcode);
4042
}
4143
else
4244
{
@@ -183,18 +185,18 @@ public class ModelCheckpoint : Callback
183185
/// <param name="save_best_only">if save_best_only=True, the latest best model according to the quantity monitored will not be overwritten.</param>
184186
/// <param name="save_weights_only"> if True, then only the model's weights will be saved (model.save_weights(filepath)), else the full model is saved (model.save(filepath)).</param>
185187
/// <param name="mode">one of {auto, min, max}. If save_best_only=True, the decision to overwrite the current save file is made based on either the maximization or the minimization of the monitored quantity. For val_acc, this should be max, for val_loss this should be min, etc. In auto mode, the direction is automatically inferred from the name of the monitored quantity.</param>
186-
/// <param name="period">Interval (number of epochs) between checkpoints.</param>
187-
public ModelCheckpoint(string filepath, string monitor = "val_loss", int verbose = 0, bool save_best_only = true
188-
, bool save_weights_only = false, string mode = "auto", int period = 1)
188+
/// <param name="save_freq">'epoch' or integer. When using 'epoch', the callback saves the model after each epoch. When using integer, the callback saves the model at end of this many batches.</param>
189+
public ModelCheckpoint(string filepath, string monitor = "val_loss", int verbose = 0, bool save_best_only = false
190+
, bool save_weights_only = false, string mode = "auto", string save_freq= "epoch")
189191
{
190192
Parameters["filepath"] = filepath;
191193
Parameters["monitor"] = monitor;
192194
Parameters["verbose"] = verbose;
193195
Parameters["save_best_only"] = save_best_only;
194196
Parameters["save_weights_only"] = save_weights_only;
195197
Parameters["mode"] = mode;
196-
Parameters["period"] = period;
197-
198+
Parameters["save_freq"] = save_freq;
199+
//ToDo: extend options parameter
198200
PyInstance = Instance.keras.callbacks.ModelCheckpoint;
199201
Init();
200202
}
@@ -216,7 +218,9 @@ public class EarlyStopping : Callback
216218
/// <param name="mode">one of {auto, min, max}. In min mode, training will stop when the quantity monitored has stopped decreasing; in max mode it will stop when the quantity monitored has stopped increasing; in auto mode, the direction is automatically inferred from the name of the monitored quantity.</param>
217219
/// <param name="baseline"> Baseline value for the monitored quantity to reach. Training will stop if the model doesn't show improvement over the baseline.</param>
218220
/// <param name="restore_best_weights"> whether to restore model weights from the epoch with the best value of the monitored quantity. If False, the model weights obtained at the last step of training are used.</param>
219-
public EarlyStopping(string monitor = "val_loss", float min_delta = 0, int patience = 0, int verbose = 0, string mode = "auto", float? baseline = null, bool restore_best_weights = false)
221+
/// <param name="start_from_epoch"> Number of epochs to wait before starting to monitor improvement. This allows for a warm-up period in which no improvement is expected and thus training will not be stopped.</param>
222+
public EarlyStopping(string monitor = "val_loss", float min_delta = 0, int patience = 0, int verbose = 0, string mode = "auto",
223+
float? baseline = null, bool restore_best_weights = false, int start_from_epoch = 0)
220224
{
221225
Parameters["monitor"] = monitor;
222226
Parameters["min_delta"] = min_delta;
@@ -225,6 +229,7 @@ public EarlyStopping(string monitor = "val_loss", float min_delta = 0, int patie
225229
Parameters["mode"] = mode;
226230
Parameters["baseline"] = baseline;
227231
Parameters["restore_best_weights"] = restore_best_weights;
232+
Parameters["start_from_epoch"] = start_from_epoch;
228233

229234
PyInstance = Instance.keras.callbacks.EarlyStopping;
230235
Init();
@@ -294,28 +299,24 @@ public class TensorBoard : Callback
294299
/// </summary>
295300
/// <param name="log_dir"> the path of the directory where to save the log files to be parsed by TensorBoard.</param>
296301
/// <param name="histogram_freq"> frequency (in epochs) at which to compute activation and weight histograms for the layers of the model. If set to 0, histograms won't be computed. Validation data (or split) must be specified for histogram visualizations.</param>
297-
/// <param name="batch_size"> size of batch of inputs to feed to the network for histograms computation.</param>
298302
/// <param name="write_graph"> whether to visualize the graph in TensorBoard. The log file can become quite large when write_graph is set to True.</param>
299-
/// <param name="write_grads"> whether to visualize gradient histograms in TensorBoard. histogram_freq must be greater than 0.</param>
300303
/// <param name="write_images"> whether to write model weights to visualize as image in TensorBoard.</param>
304+
/// <param name="write_steps_per_second"> whether to log the training steps per second into TensorBoard. This supports both epoch and batch frequency logging.</param>
305+
/// <param name="update_freq"> 'batch' or 'epoch' or integer. When using 'epoch', writes the losses and metrics to TensorBoard after every epoch. </param>
301306
/// <param name="embeddings_freq"> frequency (in epochs) at which selected embedding layers will be saved. If set to 0, embeddings won't be computed. Data to be visualized in TensorBoard's Embedding tab must be passed as embeddings_data.</param>
302-
/// <param name="embeddings_layer_names"> a list of names of layers to keep eye on. If None or empty list all the embedding layer will be watched.</param>
303307
/// <param name="embeddings_metadata"> a dictionary which maps layer name to a file name in which metadata for this embedding layer is saved. See the details about metadata files format. In case if the same metadata file is used for all embedding layers, string can be passed.</param>
304-
/// <param name="embeddings_data"> data to be embedded at layers specified in embeddings_layer_names. Numpy array (if the model has a single input) or list of Numpy arrays (if the model has multiple inputs). Learn more about embeddings.</param>
305-
public TensorBoard(string log_dir= "./logs", int histogram_freq= 0, int batch_size= 32, bool write_graph= true, bool write_grads= false,
306-
bool write_images= false, int embeddings_freq= 0, string[] embeddings_layer_names= null, Dictionary<string, string> embeddings_metadata= null,
307-
NDarray embeddings_data= null, string update_freq= "epoch")
308+
public TensorBoard(string log_dir= "./logs", int histogram_freq= 0, bool write_graph= true, bool write_images= false, int? write_steps_per_second = null,
309+
string update_freq = "epoch", int embeddings_freq= 0, Dictionary<string, string> embeddings_metadata= null)
308310
{
309311
Parameters["log_dir"] = log_dir;
310312
Parameters["histogram_freq"] = histogram_freq;
311-
Parameters["batch_size"] = batch_size;
312313
Parameters["write_graph"] = write_graph;
314+
Parameters["write_images"] = write_images;
315+
Parameters["write_steps_per_second"] = write_steps_per_second;
316+
Parameters["update_freq"] = update_freq;
313317
Parameters["embeddings_freq"] = embeddings_freq;
314-
Parameters["embeddings_layer_names"] = embeddings_layer_names;
315318
Parameters["embeddings_metadata"] = embeddings_metadata;
316-
Parameters["embeddings_data"] = embeddings_data?.PyObject;
317-
Parameters["update_freq"] = update_freq;
318-
319+
319320
PyInstance = Instance.keras.callbacks.TensorBoard;
320321
Init();
321322
}
@@ -380,4 +381,53 @@ public CSVLogger(string filename, string separator = ",", bool append = false)
380381
Init();
381382
}
382383
}
384+
385+
/// <summary>
386+
/// BackupAndRestore callback is intended to recover training from an interruption that has happened in the middle of a Model.fit execution,
387+
/// by backing up the training states in a temporary checkpoint file (with the help of a tf.train.CheckpointManager), at the end of each epoch.
388+
/// </summary>
389+
public class BackupAndRestore : Callback
390+
{
391+
/// <summary>
392+
/// Initializes a new instance of the <see cref="BackupAndRestore" /> class.
393+
/// </summary>
394+
/// <param name="backup_dir">String, path to store the checkpoint. e.g. backup_dir = os.path.join(working_dir, 'backup')</param>
395+
/// <param name="save_freq">'epoch', integer, or False. When set to 'epoch' the callback saves the checkpoint at the end of each epoch</param>
396+
/// <param name="delete_checkpoint">Boolean, default to True. This BackupAndRestore callback works by saving a checkpoint to back up the training state</param>
397+
/// <param name="save_before_preemption">A boolean value instructing whether to turn on the automatic checkpoint saving for preemption/maintenance events. </param>
398+
public BackupAndRestore(string backup_dir, string save_freq = "epoch", bool delete_checkpoint = true, bool save_before_preemption = false)
399+
{
400+
Parameters["backup_dir"] = backup_dir;
401+
Parameters["save_freq"] = save_freq;
402+
Parameters["delete_checkpoint"] = delete_checkpoint;
403+
Parameters["save_before_preemption"] = save_before_preemption;
404+
405+
PyInstance = Instance.keras.callbacks.BackupAndRestore;
406+
Init();
407+
}
408+
}
409+
410+
/// <summary>
411+
/// Container abstracting a list of callbacks.
412+
/// </summary>
413+
public class CallbackList : Callback
414+
{
415+
/// <summary>
416+
/// Initializes a new instance of the <see cref="CallbackList" /> class.
417+
/// </summary>
418+
/// <param name="callbacks">List of Callback instances.</param>
419+
/// <param name="add_history">Whether a History callback should be added, if one does not already exist in the callbacks list.</param>
420+
/// <param name="add_progbar">Whether a ProgbarLogger callback should be added, if one does not already exist in the callbacks list.</param>
421+
/// <param name="model">The Model these callbacks are used with.</param>
422+
public CallbackList(List<Callback> callbacks, bool add_history = false, bool add_progbar = false, BaseModel model = null)
423+
{
424+
Parameters["callbacks"] = callbacks;
425+
Parameters["add_history"] = add_history;
426+
Parameters["add_progbar"] = add_progbar;
427+
Parameters["model"] = model.ToPython();
428+
429+
PyInstance = Instance.keras.callbacks.CallbackList;
430+
Init();
431+
}
432+
}
383433
}

0 commit comments

Comments
 (0)