Skip to content

Commit 8d80f1c

Browse files
authored
Start clang tidy testing (#27)
* Apply some clang tidy findings * Update gitignore * Remaining easy cleanup * Fix vcpkg version
1 parent 791677d commit 8d80f1c

18 files changed

+77
-181
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ __pycache__
66
.venv
77
.env
88
.nox
9+
.DS_Store
910

1011
setup.log
1112
install_KLU_Sundials

src/pybammsolvers/idaklu_source/Expressions/Base/Expression.hpp

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,6 @@
33

44
#include "ExpressionTypes.hpp"
55
#include "../../common.hpp"
6-
#include "../../Options.hpp"
7-
#include <memory>
86
#include <vector>
97

108
class Expression {

src/pybammsolvers/idaklu_source/Expressions/Base/ExpressionSet.hpp

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,9 @@
11
#ifndef PYBAMM_IDAKLU_EXPRESSION_SET_HPP
22
#define PYBAMM_IDAKLU_EXPRESSION_SET_HPP
33

4-
#include "ExpressionTypes.hpp"
54
#include "Expression.hpp"
65
#include "../../common.hpp"
76
#include "../../Options.hpp"
8-
#include <memory>
97

108
template <class T>
119
class ExpressionSet

src/pybammsolvers/idaklu_source/Expressions/Casadi/CasadiFunctions.hpp

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,6 @@
55
#include "../Expressions.hpp"
66
#include <casadi/casadi.hpp>
77
#include <casadi/core/function.hpp>
8-
#include <casadi/core/sparsity.hpp>
9-
#include <memory>
108

119
/**
1210
* @brief Class for handling individual casadi functions

src/pybammsolvers/idaklu_source/IDAKLUSolverGroup.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,13 +45,15 @@ std::vector<Solution> IDAKLUSolverGroup::solve(
4545
throw std::invalid_argument(
4646
"t_eval must have at least 2 entries"
4747
);
48-
} else if (save_interp_steps) {
48+
}
49+
if (save_interp_steps) {
4950
if (t_interp.front() < t_eval.front()) {
5051
throw std::invalid_argument(
5152
"t_interp values must be greater than the smallest t_eval value: "
5253
+ std::to_string(t_eval.front())
5354
);
54-
} else if (t_interp.back() > t_eval.back()) {
55+
}
56+
if (t_interp.back() > t_eval.back()) {
5557
throw std::invalid_argument(
5658
"t_interp values must be less than the greatest t_eval value: "
5759
+ std::to_string(t_eval.back())

src/pybammsolvers/idaklu_source/IDAKLUSolverOpenMP.hpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@ using std::vector;
88

99
#include "Options.hpp"
1010
#include "Solution.hpp"
11-
#include "sundials_legacy_wrapper.hpp"
1211

1312
/**
1413
* @brief Abstract solver class based on OpenMP vectors

src/pybammsolvers/idaklu_source/IDAKLUSolverOpenMP.inl

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
#pragma once
2+
13
#include "Expressions/Expressions.hpp"
24
#include "sundials_functions.hpp"
35
#include <vector>

src/pybammsolvers/idaklu_source/IdakluJax.cpp

Lines changed: 40 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,7 @@
11
#include "IdakluJax.hpp"
2-
3-
#include <pybind11/functional.h>
42
#include <pybind11/numpy.h>
5-
#include <pybind11/pybind11.h>
63
#include <pybind11/stl.h>
74
#include <pybind11/stl_bind.h>
8-
9-
#include <vector>
10-
#include <iostream>
115
#include <functional>
126

137
// Initialise static variable
@@ -18,7 +12,7 @@ std::map<std::int64_t, IdakluJax*> idaklu_jax_instances;
1812

1913
// Create a new IdakluJax object, assign identifier, add to the objects list and return as pointer
2014
IdakluJax *create_idaklu_jax() {
21-
IdakluJax *p = new IdakluJax();
15+
auto *p = new IdakluJax();
2216
idaklu_jax_instances[p->get_index()] = p;
2317
return p;
2418
}
@@ -32,21 +26,21 @@ IdakluJax::~IdakluJax() {
3226
}
3327

3428
void IdakluJax::register_callback_eval(CallbackEval h) {
35-
callback_eval = h;
29+
callback_eval = std::move(h);
3630
}
3731

3832
void IdakluJax::register_callback_jvp(CallbackJvp h) {
39-
callback_jvp = h;
33+
callback_jvp = std::move(h);
4034
}
4135

4236
void IdakluJax::register_callback_vjp(CallbackVjp h) {
43-
callback_vjp = h;
37+
callback_vjp = std::move(h);
4438
}
4539

4640
void IdakluJax::register_callbacks(CallbackEval h_eval, CallbackJvp h_jvp, CallbackVjp h_vjp) {
47-
register_callback_eval(h_eval);
48-
register_callback_jvp(h_jvp);
49-
register_callback_vjp(h_vjp);
41+
register_callback_eval(std::move(h_eval));
42+
register_callback_jvp(std::move(h_jvp));
43+
register_callback_vjp(std::move(h_vjp));
5044
}
5145

5246
void IdakluJax::cpu_idaklu_eval(void *out_tuple, const void **in) {
@@ -55,10 +49,11 @@ void IdakluJax::cpu_idaklu_eval(void *out_tuple, const void **in) {
5549
const std::int64_t n_t = *reinterpret_cast<const std::int64_t *>(in[k++]);
5650
const std::int64_t n_vars = *reinterpret_cast<const std::int64_t *>(in[k++]);
5751
const std::int64_t n_inputs = *reinterpret_cast<const std::int64_t *>(in[k++]);
58-
const realtype *t = reinterpret_cast<const realtype *>(in[k++]);
59-
realtype *inputs = new realtype(n_inputs);
60-
for (int i = 0; i < n_inputs; i++)
52+
const auto *t = reinterpret_cast<const realtype *>(in[k++]);
53+
auto *inputs = new realtype(n_inputs);
54+
for (int i = 0; i < n_inputs; i++) {
6155
inputs[i] = reinterpret_cast<const realtype *>(in[k++])[0];
56+
}
6257
void *out = reinterpret_cast<realtype *>(out_tuple);
6358

6459
// Log
@@ -75,16 +70,16 @@ void IdakluJax::cpu_idaklu_eval(void *out_tuple, const void **in) {
7570
PyGILState_STATE state = PyGILState_Ensure();
7671

7772
// Convert time vector to an np_array
78-
py::capsule t_capsule(t, "t_capsule");
79-
np_array t_np = np_array({n_t}, {sizeof(realtype)}, t, t_capsule);
73+
const py::capsule t_capsule(t, "t_capsule");
74+
const auto t_np = np_array({n_t}, {sizeof(realtype)}, t, t_capsule);
8075

8176
// Convert inputs to an np_array
82-
py::capsule in_capsule(inputs, "in_capsule");
83-
np_array in_np = np_array({n_inputs}, {sizeof(realtype)}, inputs, in_capsule);
77+
const py::capsule in_capsule(inputs, "in_capsule");
78+
const auto in_np = np_array({n_inputs}, {sizeof(realtype)}, inputs, in_capsule);
8479

8580
// Call solve function in python to obtain an np_array
86-
np_array out_np = callback_eval(t_np, in_np);
87-
auto out_buf = out_np.request();
81+
const np_array out_np = callback_eval(t_np, in_np);
82+
const auto out_buf = out_np.request();
8883
const realtype *out_ptr = reinterpret_cast<realtype *>(out_buf.ptr);
8984

9085
// Arrange into 'out' array
@@ -100,14 +95,16 @@ void IdakluJax::cpu_idaklu_jvp(void *out_tuple, const void **in) {
10095
const std::int64_t n_t = *reinterpret_cast<const std::int64_t *>(in[k++]);
10196
const std::int64_t n_vars = *reinterpret_cast<const std::int64_t *>(in[k++]);
10297
const std::int64_t n_inputs = *reinterpret_cast<const std::int64_t *>(in[k++]);
103-
const realtype *primal_t = reinterpret_cast<const realtype *>(in[k++]);
104-
realtype *primal_inputs = new realtype(n_inputs);
105-
for (int i = 0; i < n_inputs; i++)
98+
const auto *primal_t = reinterpret_cast<const realtype *>(in[k++]);
99+
auto *primal_inputs = new realtype(n_inputs);
100+
for (int i = 0; i < n_inputs; i++) {
106101
primal_inputs[i] = reinterpret_cast<const realtype *>(in[k++])[0];
107-
const realtype *tangent_t = reinterpret_cast<const realtype *>(in[k++]);
108-
realtype *tangent_inputs = new realtype(n_inputs);
109-
for (int i = 0; i < n_inputs; i++)
102+
}
103+
const auto *tangent_t = reinterpret_cast<const realtype *>(in[k++]);
104+
auto *tangent_inputs = new realtype(n_inputs);
105+
for (int i = 0; i < n_inputs; i++) {
110106
tangent_inputs[i] = reinterpret_cast<const realtype *>(in[k++])[0];
107+
}
111108
void *out = reinterpret_cast<realtype *>(out_tuple);
112109

113110
// Log
@@ -125,8 +122,8 @@ void IdakluJax::cpu_idaklu_jvp(void *out_tuple, const void **in) {
125122
PyGILState_STATE state = PyGILState_Ensure();
126123

127124
// Form primals time vector as np_array
128-
py::capsule primal_t_capsule(primal_t, "primal_t_capsule");
129-
np_array primal_t_np = np_array(
125+
const py::capsule primal_t_capsule(primal_t, "primal_t_capsule");
126+
const auto primal_t_np = np_array(
130127
{n_t},
131128
{sizeof(realtype)},
132129
primal_t,
@@ -135,25 +132,25 @@ void IdakluJax::cpu_idaklu_jvp(void *out_tuple, const void **in) {
135132

136133
// Pack primals as np_array
137134
py::capsule primal_inputs_capsule(primal_inputs, "primal_inputs_capsule");
138-
np_array primal_inputs_np = np_array(
135+
const auto primal_inputs_np = np_array(
139136
{n_inputs},
140137
{sizeof(realtype)},
141138
primal_inputs,
142139
primal_inputs_capsule
143140
);
144141

145142
// Form tangents time vector as np_array
146-
py::capsule tangent_t_capsule(tangent_t, "tangent_t_capsule");
147-
np_array tangent_t_np = np_array(
143+
const py::capsule tangent_t_capsule(tangent_t, "tangent_t_capsule");
144+
const auto tangent_t_np = np_array(
148145
{n_t},
149146
{sizeof(realtype)},
150147
tangent_t,
151148
tangent_t_capsule
152149
);
153150

154151
// Pack tangents as np_array
155-
py::capsule tangent_inputs_capsule(tangent_inputs, "tangent_inputs_capsule");
156-
np_array tangent_inputs_np = np_array(
152+
const py::capsule tangent_inputs_capsule(tangent_inputs, "tangent_inputs_capsule");
153+
const auto tangent_inputs_np = np_array(
157154
{n_inputs},
158155
{sizeof(realtype)},
159156
tangent_inputs,
@@ -165,7 +162,7 @@ void IdakluJax::cpu_idaklu_jvp(void *out_tuple, const void **in) {
165162
primal_t_np, primal_inputs_np,
166163
tangent_t_np, tangent_inputs_np
167164
);
168-
auto buf = y_dot.request();
165+
const auto buf = y_dot.request();
169166
const realtype *ptr = reinterpret_cast<realtype *>(buf.ptr);
170167

171168
// Arrange into 'out' array
@@ -182,13 +179,14 @@ void IdakluJax::cpu_idaklu_vjp(void *out_tuple, const void **in) {
182179
const std::int64_t n_y_bar0 = *reinterpret_cast<const std::int64_t *>(in[k++]);
183180
const std::int64_t n_y_bar1 = *reinterpret_cast<const std::int64_t *>(in[k++]);
184181
const std::int64_t n_y_bar = (n_y_bar1 > 0) ? (n_y_bar0*n_y_bar1) : n_y_bar0;
185-
const realtype *y_bar = reinterpret_cast<const realtype *>(in[k++]);
186-
const std::int64_t *invar = reinterpret_cast<const std::int64_t *>(in[k++]);
187-
const realtype *t = reinterpret_cast<const realtype *>(in[k++]);
188-
realtype *inputs = new realtype(n_inputs);
189-
for (int i = 0; i < n_inputs; i++)
182+
const auto *y_bar = reinterpret_cast<const realtype *>(in[k++]);
183+
const auto *invar = reinterpret_cast<const std::int64_t *>(in[k++]);
184+
const auto *t = reinterpret_cast<const realtype *>(in[k++]);
185+
auto *inputs = new realtype(n_inputs);
186+
for (int i = 0; i < n_inputs; i++) {
190187
inputs[i] = reinterpret_cast<const realtype *>(in[k++])[0];
191-
realtype *out = reinterpret_cast<realtype *>(out_tuple);
188+
}
189+
auto *out = reinterpret_cast<realtype *>(out_tuple);
192190

193191
// Log
194192
DEBUG("cpu_idaklu_vjp");

src/pybammsolvers/idaklu_source/Solution.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,4 +36,4 @@ class Solution
3636
np_array y_term;
3737
};
3838

39-
#endif // PYBAMM_IDAKLU_COMMON_HPP
39+
#endif // PYBAMM_IDAKLU_SOLUTION_HPP

src/pybammsolvers/idaklu_source/SolutionData.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ Solution SolutionData::generate_solution() {
44
py::capsule free_t_when_done(
55
t_return,
66
[](void *f) {
7-
realtype *vect = reinterpret_cast<realtype *>(f);
7+
auto *vect = reinterpret_cast<realtype *>(f);
88
delete[] vect;
99
}
1010
);
@@ -18,7 +18,7 @@ Solution SolutionData::generate_solution() {
1818
py::capsule free_y_when_done(
1919
y_return,
2020
[](void *f) {
21-
realtype *vect = reinterpret_cast<realtype *>(f);
21+
auto *vect = reinterpret_cast<realtype *>(f);
2222
delete[] vect;
2323
}
2424
);
@@ -32,7 +32,7 @@ Solution SolutionData::generate_solution() {
3232
py::capsule free_yp_when_done(
3333
yp_return,
3434
[](void *f) {
35-
realtype *vect = reinterpret_cast<realtype *>(f);
35+
auto *vect = reinterpret_cast<realtype *>(f);
3636
delete[] vect;
3737
}
3838
);
@@ -46,7 +46,7 @@ Solution SolutionData::generate_solution() {
4646
py::capsule free_yS_when_done(
4747
yS_return,
4848
[](void *f) {
49-
realtype *vect = reinterpret_cast<realtype *>(f);
49+
auto *vect = reinterpret_cast<realtype *>(f);
5050
delete[] vect;
5151
}
5252
);
@@ -64,7 +64,7 @@ Solution SolutionData::generate_solution() {
6464
py::capsule free_ypS_when_done(
6565
ypS_return,
6666
[](void *f) {
67-
realtype *vect = reinterpret_cast<realtype *>(f);
67+
auto *vect = reinterpret_cast<realtype *>(f);
6868
delete[] vect;
6969
}
7070
);
@@ -83,7 +83,7 @@ Solution SolutionData::generate_solution() {
8383
py::capsule free_yterm_when_done(
8484
yterm_return,
8585
[](void *f) {
86-
realtype *vect = reinterpret_cast<realtype *>(f);
86+
auto *vect = reinterpret_cast<realtype *>(f);
8787
delete[] vect;
8888
}
8989
);

0 commit comments

Comments
 (0)