@@ -12,11 +12,9 @@ of size `(num_features, num_nodes)`.
12
12
- `bias`: Add learnable bias.
13
13
- `init`: Weights' initializer.
14
14
15
- # Example
15
+ # Examples
16
16
17
17
```jldoctest
18
- julia> using GeometricFlux, Flux
19
-
20
18
julia> gc = GCNConv(1024=>256, relu)
21
19
GCNConv(1024 => 256, relu)
22
20
```
@@ -83,7 +81,7 @@ Chebyshev spectral graph convolutional layer.
83
81
- `bias`: Add learnable bias.
84
82
- `init`: Weights' initializer.
85
83
86
- # Example
84
+ # Examples
87
85
88
86
```jldoctest
89
87
julia> cc = ChebConv(1024=>256, 5, relu)
@@ -107,6 +105,8 @@ function ChebConv(ch::Pair{Int,Int}, k::Int, σ=identity;
107
105
ChebConv (W, b, k, σ)
108
106
end
109
107
108
+ @deprecate ChebConv (fg, args... ; kwargs... ) WithGraph (fg, ChebConv (args... ; kwargs... ))
109
+
110
110
@functor ChebConv
111
111
112
112
Flux. trainable (l:: ChebConv ) = (l. weight, l. bias)
@@ -167,6 +167,18 @@ Graph neural network layer.
167
167
`*`, `/`, `max`, `min` and `mean` are available.
168
168
- `bias`: Add learnable bias.
169
169
- `init`: Weights' initializer.
170
+
171
+ # Examples
172
+
173
+ ```jldoctest
174
+ julia> GraphConv(1024=>256, relu)
175
+ GraphConv(1024 => 256, relu, aggr=+)
176
+
177
+ julia> GraphConv(1024=>256, relu, *)
178
+ GraphConv(1024 => 256, relu, aggr=*)
179
+ ```
180
+
181
+ See also [`WithGraph`](@ref) for training layer with static graph.
170
182
"""
171
183
struct GraphConv{A<: AbstractMatrix ,B,F,O} <: MessagePassing
172
184
weight1:: A
@@ -185,6 +197,8 @@ function GraphConv(ch::Pair{Int,Int}, σ=identity, aggr=+;
185
197
GraphConv (W1, W2, b, σ, aggr)
186
198
end
187
199
200
+ @deprecate GraphConv (fg, args... ; kwargs... ) WithGraph (fg, GraphConv (args... ; kwargs... ))
201
+
188
202
@functor GraphConv
189
203
190
204
Flux. trainable (l:: GraphConv ) = (l. weight1, l. weight2, l. bias)
@@ -234,6 +248,24 @@ Graph attentional layer.
234
248
- `heads`: Number attention heads
235
249
- `concat`: Concatenate layer output or not. If not, layer output is averaged.
236
250
- `negative_slope::Real`: Keyword argument, the parameter of LeakyReLU.
251
+
252
+ # Examples
253
+
254
+ ```jldoctest
255
+ julia> GATConv(1024=>256, relu)
256
+ GATConv(1024=>256, heads=1, concat=true, LeakyReLU(λ=0.2))
257
+
258
+ julia> GATConv(1024=>256, relu, heads=4)
259
+ GATConv(1024=>1024, heads=4, concat=true, LeakyReLU(λ=0.2))
260
+
261
+ julia> GATConv(1024=>256, relu, heads=4, concat=false)
262
+ GATConv(1024=>1024, heads=4, concat=false, LeakyReLU(λ=0.2))
263
+
264
+ julia> GATConv(1024=>256, relu, negative_slope=0.1f0)
265
+ GATConv(1024=>256, heads=1, concat=true, LeakyReLU(λ=0.1))
266
+ ```
267
+
268
+ See also [`WithGraph`](@ref) for training layer with static graph.
237
269
"""
238
270
struct GATConv{T,A<: AbstractMatrix{T} ,B,F} <: MessagePassing
239
271
weight:: A
@@ -255,6 +287,8 @@ function GATConv(ch::Pair{Int,Int}, σ=identity; heads::Int=1, concat::Bool=true
255
287
GATConv (W, b, a, σ, negative_slope, ch, heads, concat)
256
288
end
257
289
290
+ @deprecate GATConv (fg, args... ; kwargs... ) WithGraph (fg, GATConv (args... ; kwargs... ))
291
+
258
292
@functor GATConv
259
293
260
294
Flux. trainable (l:: GATConv ) = (l. weight, l. bias, l. a)
@@ -342,6 +376,8 @@ function Base.show(io::IO, l::GATConv)
342
376
in_channel = size (l. weight, ndims (l. weight))
343
377
out_channel = size (l. weight, ndims (l. weight)- 1 )
344
378
print (io, " GATConv(" , in_channel, " =>" , out_channel)
379
+ print (io, " , heads=" , l. heads)
380
+ print (io, " , concat=" , l. concat)
345
381
print (io, " , LeakyReLU(λ=" , l. negative_slope)
346
382
print (io, " ))" )
347
383
end
@@ -358,6 +394,18 @@ Gated graph convolution layer.
358
394
- `num_layers`: The number of gated recurrent unit.
359
395
- `aggr`: An aggregate function applied to the result of message function. `+`, `-`,
360
396
`*`, `/`, `max`, `min` and `mean` are available.
397
+
398
+ # Examples
399
+
400
+ ```jldoctest
401
+ julia> GatedGraphConv(256, 4)
402
+ GatedGraphConv((256 => 256)^4, aggr=+)
403
+
404
+ julia> GatedGraphConv(256, 4, aggr=*)
405
+ GatedGraphConv((256 => 256)^4, aggr=*)
406
+ ```
407
+
408
+ See also [`WithGraph`](@ref) for training layer with static graph.
361
409
"""
362
410
struct GatedGraphConv{A<: AbstractArray{<:Number,3} ,R,O} <: MessagePassing
363
411
weight:: A
@@ -373,6 +421,8 @@ function GatedGraphConv(out_ch::Int, num_layers::Int; aggr=+, init=glorot_unifor
373
421
GatedGraphConv (w, gru, out_ch, num_layers, aggr)
374
422
end
375
423
424
+ @deprecate GatedGraphConv (fg, args... ; kwargs... ) WithGraph (fg, GatedGraphConv (args... ; kwargs... ))
425
+
376
426
@functor GatedGraphConv
377
427
378
428
Flux. trainable (l:: GatedGraphConv ) = (l. weight, l. gru)
@@ -424,7 +474,20 @@ Edge convolutional layer.
424
474
# Arguments
425
475
426
476
- `nn`: A neural network (e.g. a Dense layer or a MLP).
427
- - `aggr`: An aggregate function applied to the result of message function. `+`, `max` and `mean` are available.
477
+ - `aggr`: An aggregate function applied to the result of message function.
478
+ `+`, `max` and `mean` are available.
479
+
480
+ # Examples
481
+
482
+ ```jldoctest
483
+ julia> EdgeConv(Dense(1024, 256, relu))
484
+ EdgeConv(Dense(1024, 256, relu), aggr=max)
485
+
486
+ julia> EdgeConv(Dense(1024, 256, relu), aggr=+)
487
+ EdgeConv(Dense(1024, 256, relu), aggr=+)
488
+ ```
489
+
490
+ See also [`WithGraph`](@ref) for training layer with static graph.
428
491
"""
429
492
struct EdgeConv{N,O} <: MessagePassing
430
493
nn:: N
433
496
434
497
EdgeConv (nn; aggr= max) = EdgeConv (nn, aggr)
435
498
499
+ @deprecate EdgeConv (fg, args... ; kwargs... ) WithGraph (fg, EdgeConv (args... ; kwargs... ))
500
+
436
501
@functor EdgeConv
437
502
438
503
Flux. trainable (l:: EdgeConv ) = (l. nn,)
470
535
- `nn`: A neural network/layer.
471
536
- `eps`: Weighting factor.
472
537
473
- The definition of this is as defined in the original paper,
474
- Xu et. al. (2018) https://arxiv.org/abs/1810.00826.
538
+ # Examples
539
+
540
+ ```jldoctest
541
+ julia> GINConv(Dense(1024, 256, relu))
542
+ GINConv(Dense(1024, 256, relu), ϵ=0.0)
543
+
544
+ julia> GINConv(Dense(1024, 256, relu), 1.f-6)
545
+ GINConv(Dense(1024, 256, relu), ϵ=1.0e-6)
546
+ ```
547
+
548
+ See also [`WithGraph`](@ref) for training layer with static graph.
475
549
"""
476
550
struct GINConv{N,R<: Real } <: MessagePassing
477
551
nn:: N
480
554
481
555
GINConv (nn, eps= 0f0 ) = GINConv (nn, eps)
482
556
557
+ @deprecate GINConv (fg, args... ; kwargs... ) WithGraph (fg, GINConv (args... ; kwargs... ))
558
+
483
559
@functor GINConv
484
560
485
561
Flux. trainable (g:: GINConv ) = (g. nn,)
@@ -502,19 +578,31 @@ function (l::GINConv)(el::NamedTuple, x::AbstractArray)
502
578
return V
503
579
end
504
580
581
+ function Base. show (io:: IO , l:: GINConv )
582
+ print (io, " GINConv(" , l. nn, " , ϵ=" , l. eps, " )" )
583
+ end
584
+
505
585
506
586
"""
507
- CGConv((node_dim, edge_dim), out, init, bias=true)
587
+ CGConv((node_dim, edge_dim), init, bias=true)
508
588
509
589
Crystal Graph Convolutional network. Uses both node and edge features.
510
590
511
591
# Arguments
512
592
513
593
- `node_dim`: Dimensionality of the input node features. Also is necessarily the output dimensionality.
514
594
- `edge_dim`: Dimensionality of the input edge features.
515
- - `out`: Dimensionality of the output features.
516
595
- `init`: Initialization algorithm for each of the weight matrices
517
596
- `bias`: Whether or not to learn an additive bias parameter.
597
+
598
+ # Examples
599
+
600
+ ```jldoctest
601
+ julia> CGConv((128, 32))
602
+ CGConv(node dim=128, edge dim=32)
603
+ ```
604
+
605
+ See also [`WithGraph`](@ref) for training layer with static graph.
518
606
"""
519
607
struct CGConv{A<: AbstractMatrix ,B} <: MessagePassing
520
608
Wf:: A
@@ -523,6 +611,8 @@ struct CGConv{A<:AbstractMatrix,B} <: MessagePassing
523
611
bs:: B
524
612
end
525
613
614
+ @deprecate CGConv (fg, args... ; kwargs... ) WithGraph (fg, CGConv (args... ; kwargs... ))
615
+
526
616
@functor CGConv
527
617
528
618
Flux. trainable (l:: CGConv ) = (l. Wf, l. Ws, l. bf, l. bs)
@@ -560,3 +650,9 @@ function (l::CGConv)(el::NamedTuple, X::AbstractArray, E::AbstractArray)
560
650
_, V, _ = propagate (l, el, E, X, nothing , + , nothing , nothing )
561
651
return V
562
652
end
653
+
654
+ function Base. show (io:: IO , l:: CGConv )
655
+ node_dim, d = size (l. Wf)
656
+ edge_dim = d - 2 * node_dim
657
+ print (io, " CGConv(node dim=" , node_dim, " , edge dim=" , edge_dim, " )" )
658
+ end
0 commit comments