Skip to content

Commit fd8bdf0

Browse files
authored
Merge pull request #43 from ENSTA-U2IS/dev
🐛 Fix docs & Add docs check before merge
2 parents 3173398 + eff523e commit fd8bdf0

File tree

5 files changed

+17
-9
lines changed

5 files changed

+17
-9
lines changed

.github/workflows/build-docs.yml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,9 @@ on:
33
push:
44
branches:
55
- main
6+
pull_request:
7+
branches:
8+
- main
69
schedule:
710
- cron: "00 12 * * 0" # Every Sunday noon (preserve the cache folders)
811
workflow_dispatch:
@@ -61,6 +64,7 @@ jobs:
6164
6265
- name: Deploy
6366
uses: peaceiris/actions-gh-pages@v3
67+
if: ${{ github.event_name != 'pull_request' }}
6468
with:
6569
deploy_key: ${{ secrets.ACTIONS_DEPLOY_KEY }}
6670
external_repository: torch-uncertainty/torch-uncertainty.github.io

.github/workflows/run-tests.yml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,9 @@ on:
66
- main
77
- dev
88
pull_request:
9+
branches:
10+
- main
11+
- dev
912
schedule:
1013
- cron: "42 7 * * 0"
1114
workflow_dispatch:

auto_tutorials_source/tutorial_bayesian.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -84,9 +84,10 @@ def optim_lenet(model: nn.Module) -> dict:
8484
# We mock the arguments for the trainer
8585
with ArgvContext(
8686
"file.py",
87-
"--max_epochs 1",
88-
"--enable_progress_bar=False",
89-
"--verbose=False",
87+
"--max_epochs",
88+
"1",
89+
"--enable_progress_bar",
90+
"False",
9091
):
9192
args = init_args(datamodule=MNISTDataModule)
9293

auto_tutorials_source/tutorial_scaler.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -71,15 +71,16 @@
7171
#
7272
# When computing the ECE, you need to provide the likelihoods associated with the inputs.
7373
# To do this, just call PyTorch's softmax.
74+
#
75+
# To avoid lengthy computations (without GPU), we restrict the calibration computation to a subset
76+
# of the test set.
7477

7578
from torch.utils.data import DataLoader, random_split
7679

7780
# Split datasets
7881
dataset = dm.test
79-
cal_dataset, test_dataset = random_split(dataset, [1000, len(dataset) - 1000])
80-
cal_dataloader, test_dataloader = DataLoader(cal_dataset, batch_size=32), DataLoader(
81-
test_dataset, batch_size=32
82-
)
82+
cal_dataset, test_dataset, other = random_split(dataset, [1000, 1000, len(dataset) - 2000])
83+
test_dataloader = DataLoader(test_dataset, batch_size=32)
8384

8485
# Initialize the ECE
8586
ece = CalibrationError(task="multiclass", num_classes=100)
@@ -105,7 +106,7 @@
105106

106107
# Fit the scaler on the calibration dataset
107108
scaler = TemperatureScaler()
108-
scaler = scaler.fit(model=model, calib_loader=cal_dataloader)
109+
scaler = scaler.fit(model=model, calibration_set=cal_dataset)
109110

110111
# %%
111112
# 6. Iterating Again to Compute the Improved ECE

docs/source/api.rst

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,6 @@ Metrics
194194
BrierScore
195195
Disagreement
196196
Entropy
197-
JensenShannonDivergence
198197
MutualInformation
199198
NegativeLogLikelihood
200199

0 commit comments

Comments
 (0)