Skip to content

Commit 693b634

Browse files
authored
Merge pull request #19 from axelparmentier/giom
Prepare for v0.3.0
2 parents ddc1f6a + b179f16 commit 693b634

File tree

11 files changed

+31
-36
lines changed

11 files changed

+31
-36
lines changed

CITATION.bib

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ @misc{InferOpt.jl
22
author = {Guillaume Dalle, Léo Baty, Louis Bouvier and Axel Parmentier},
33
title = {InferOpt.jl},
44
url = {https://github.com/axelparmentier/InferOpt.jl},
5-
version = {v0.2.0},
5+
version = {v0.3.0},
66
year = {2022},
7-
month = {6}
7+
month = {7}
88
}

Project.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
name = "InferOpt"
22
uuid = "4846b161-c94e-4150-8dac-c7ae193c601f"
33
authors = ["Guillaume Dalle", "Léo Baty", "Louis Bouvier", "Axel Parmentier"]
4-
version = "0.2.0"
4+
version = "0.3.0"
55

66
[deps]
77
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"

docs/src/tutorial.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ Thanks to this smoothing, we can now train our model with a standard gradient op
121121

122122
````@example tutorial
123123
encoder = deepcopy(initial_encoder)
124-
opt = ADAM();
124+
opt = Adam();
125125
losses = Float64[]
126126
for epoch in 1:200
127127
l = 0.0

src/InferOpt.jl

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ using StatsBase: StatsBase, sample
1616
using Test
1717

1818
include("utils/probability_distribution.jl")
19-
include("utils/composition.jl")
19+
include("utils/pushforward.jl")
2020

2121
include("interpolation/interpolation.jl")
2222

@@ -42,8 +42,9 @@ include("ssvm/isbaseloss.jl")
4242
include("ssvm/zeroone_baseloss.jl")
4343
include("ssvm/ssvm_loss.jl")
4444

45-
export FixedAtomsProbabilityDistribution, sample, compute_expectation
46-
export ProbabilisticComposition
45+
export FixedAtomsProbabilityDistribution
46+
export compute_expectation, compress_distribution!
47+
export Pushforward
4748
export compute_probability_distribution
4849

4950
export Interpolation

src/utils/probability_distribution.jl

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,11 +46,13 @@ end
4646
Base.rand(probadist::FixedAtomsProbabilityDistribution) = rand(GLOBAL_RNG, probadist)
4747

4848
"""
49-
compress!(probadist[; atol])
49+
compress_distribution!(probadist[; atol])
5050
5151
Remove duplicated atoms in `probadist` (up to a tolerance on equality).
5252
"""
53-
function compress!(probadist::FixedAtomsProbabilityDistribution{A,W}; atol=0) where {A,W}
53+
function compress_distribution!(
54+
probadist::FixedAtomsProbabilityDistribution{A,W}; atol=0
55+
) where {A,W}
5456
(; atoms, weights) = probadist
5557
to_delete = Int[]
5658
for i in length(probadist):-1:1

src/utils/composition.jl renamed to src/utils/pushforward.jl

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,24 +1,24 @@
11
"""
2-
ProbabilisticComposition{L,G}
2+
Pushforward{L,G}
33
44
Differentiable composition of a probabilistic `layer` with an arbitrary function `post_processing`.
55
6-
`ProbabilisticComposition` can be used for direct regret minimization (aka learning by experience) when the post-processing returns a cost.
6+
`Pushforward` can be used for direct regret minimization (aka learning by experience) when the post-processing returns a cost.
77
88
# Fields
99
- `layer::L`: anything that implements `compute_probability_distribution(layer, θ; kwargs...)`
1010
- `post_processing::P`: callable
1111
1212
See also: [`FixedAtomsProbabilityDistribution`](@ref).
1313
"""
14-
struct ProbabilisticComposition{L,P}
14+
struct Pushforward{L,P}
1515
layer::L
1616
post_processing::P
1717
end
1818

19-
function Base.show(io::IO, composition::ProbabilisticComposition)
19+
function Base.show(io::IO, composition::Pushforward)
2020
(; layer, post_processing) = composition
21-
return print(io, "ProbabilisticComposition($layer, $post_processing)")
21+
return print(io, "Pushforward($layer, $post_processing)")
2222
end
2323

2424
"""
@@ -30,25 +30,23 @@ This function is not differentiable if `composition.post_processing` isn't.
3030
3131
See also: [`apply_on_atoms`](@ref).
3232
"""
33-
function compute_probability_distribution(
34-
composition::ProbabilisticComposition, θ; kwargs...
35-
)
33+
function compute_probability_distribution(composition::Pushforward, θ; kwargs...)
3634
(; layer, post_processing) = composition
3735
probadist = compute_probability_distribution(layer, θ; kwargs...)
3836
post_processed_probadist = apply_on_atoms(post_processing, probadist; kwargs...)
3937
return post_processed_probadist
4038
end
4139

4240
"""
43-
(composition::ProbabilisticComposition)(θ)
41+
(composition::Pushforward)(θ)
4442
4543
Output the expectation of `composition.post_processing(X)`, where `X` follows the distribution defined by `composition.layer` applied to `θ`.
4644
4745
Unlike [`compute_probability_distribution(composition, θ)`](@ref), this function is differentiable, even if `composition.post_processing` isn't.
4846
4947
See also: [`compute_expectation`](@ref).
5048
"""
51-
function (composition::ProbabilisticComposition)(θ::AbstractArray{<:Real}; kwargs...)
49+
function (composition::Pushforward)(θ::AbstractArray{<:Real}; kwargs...)
5250
(; layer, post_processing) = composition
5351
probadist = compute_probability_distribution(layer, θ; kwargs...)
5452
return compute_expectation(probadist, post_processing; kwargs...)

test/argmax.jl

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -77,21 +77,19 @@ pipelines_experience = [
7777
(
7878
encoder=encoder_factory(),
7979
maximizer=identity,
80-
loss=ProbabilisticComposition(
81-
PerturbedAdditive(true_maximizer; ε=1.0, nb_samples=10), cost
82-
),
80+
loss=Pushforward(PerturbedAdditive(true_maximizer; ε=1.0, nb_samples=10), cost),
8381
),
8482
(
8583
encoder=encoder_factory(),
8684
maximizer=identity,
87-
loss=ProbabilisticComposition(
85+
loss=Pushforward(
8886
PerturbedMultiplicative(true_maximizer; ε=1.0, nb_samples=10), cost
8987
),
9088
),
9189
(
9290
encoder=encoder_factory(),
9391
maximizer=identity,
94-
loss=ProbabilisticComposition(
92+
loss=Pushforward(
9593
RegularizedGeneric(true_maximizer, half_square_norm, identity), cost
9694
),
9795
),

test/paths.jl

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -79,21 +79,19 @@ pipelines_experience = [
7979
(
8080
encoder=encoder_factory(),
8181
maximizer=identity,
82-
loss=ProbabilisticComposition(
83-
PerturbedAdditive(true_maximizer; ε=1.0, nb_samples=10), cost
84-
),
82+
loss=Pushforward(PerturbedAdditive(true_maximizer; ε=1.0, nb_samples=10), cost),
8583
),
8684
(
8785
encoder=encoder_factory(),
8886
maximizer=identity,
89-
loss=ProbabilisticComposition(
87+
loss=Pushforward(
9088
PerturbedMultiplicative(true_maximizer; ε=1.0, nb_samples=10), cost
9189
),
9290
),
9391
(
9492
encoder=encoder_factory(),
9593
maximizer=identity,
96-
loss=ProbabilisticComposition(
94+
loss=Pushforward(
9795
RegularizedGeneric(true_maximizer, half_square_norm, identity), cost
9896
),
9997
),

test/ranking.jl

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -71,21 +71,19 @@ pipelines_experience = [
7171
(
7272
encoder=encoder_factory(),
7373
maximizer=identity,
74-
loss=ProbabilisticComposition(
75-
PerturbedAdditive(true_maximizer; ε=1.0, nb_samples=10), cost
76-
),
74+
loss=Pushforward(PerturbedAdditive(true_maximizer; ε=1.0, nb_samples=10), cost),
7775
),
7876
(
7977
encoder=encoder_factory(),
8078
maximizer=identity,
81-
loss=ProbabilisticComposition(
79+
loss=Pushforward(
8280
PerturbedMultiplicative(true_maximizer; ε=1.0, nb_samples=10), cost
8381
),
8482
),
8583
(
8684
encoder=encoder_factory(),
8785
maximizer=identity,
88-
loss=ProbabilisticComposition(
86+
loss=Pushforward(
8987
RegularizedGeneric(true_maximizer, half_square_norm, identity), cost
9088
),
9189
),

test/tutorial.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ Thanks to this smoothing, we can now train our model with a standard gradient op
111111
=#
112112

113113
encoder = deepcopy(initial_encoder)
114-
opt = ADAM();
114+
opt = Adam();
115115
losses = Float64[]
116116
for epoch in 1:200
117117
l = 0.0

0 commit comments

Comments
 (0)