Skip to content

Commit 3b8caa8

Browse files
committed
Move from hpx::lcos:: to hpx::
1 parent 4786500 commit 3b8caa8

File tree

5 files changed

+53
-53
lines changed

5 files changed

+53
-53
lines changed

include/cppuddle/kernel_aggregation/detail/aggregation_executor_pools.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ class aggregation_pool {
5757
/* const size_t gpu_id = 1; */
5858
std::lock_guard<aggregation_mutex_t> guard(instance()[gpu_id].pool_mutex);
5959
assert(!instance()[gpu_id].aggregation_executor_pool.empty());
60-
std::optional<hpx::lcos::future<
60+
std::optional<hpx::future<
6161
typename aggregated_executor<Interface>::executor_slice>>
6262
ret;
6363
size_t local_id = (instance()[gpu_id].current_interface) %

include/cppuddle/kernel_aggregation/detail/aggregation_executors_and_allocators.hpp

Lines changed: 26 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ void exec_post_wrapper(Executor & exec, F &&f, Ts &&...ts) {
128128
}
129129
130130
template <typename Executor, typename F, typename... Ts>
131-
hpx::lcos::future<void> exec_async_wrapper(Executor & exec, F &&f, Ts &&...ts) {
131+
hpx::future<void> exec_async_wrapper(Executor & exec, F &&f, Ts &&...ts) {
132132
return hpx::async(exec, std::forward<F>(f), std::forward<Ts>(ts)...);
133133
}
134134
@@ -148,9 +148,9 @@ template <typename Executor> class aggregated_function_call {
148148
std::atomic<size_t> slice_counter = 0;
149149
150150
/// Promise to be set when all slices have visited this function call
151-
/* hpx::lcos::local::promise<void> slices_ready_promise; */
151+
/* hpx::local::promise<void> slices_ready_promise; */
152152
/// Tracks if all slices have visited this function call
153-
/* hpx::lcos::future<void> all_slices_ready = slices_ready_promise.get_future(); */
153+
/* hpx::future<void> all_slices_ready = slices_ready_promise.get_future(); */
154154
/// How many slices can we expect?
155155
const size_t number_slices;
156156
const bool async_mode;
@@ -168,7 +168,7 @@ template <typename Executor> class aggregated_function_call {
168168
aggregation_mutex_t debug_mut;
169169
#endif
170170
171-
std::vector<hpx::lcos::local::promise<void>> potential_async_promises{};
171+
std::vector<hpx::local::promise<void>> potential_async_promises{};
172172
173173
public:
174174
aggregated_function_call(const size_t number_slices, bool async_mode, Executor &exec)
@@ -182,7 +182,7 @@ template <typename Executor> class aggregated_function_call {
182182
// assert(!all_slices_ready.valid());
183183
}
184184
/// Returns true if all required slices have visited this point
185-
bool sync_aggregation_slices(hpx::lcos::future<void> &stream_future) {
185+
bool sync_aggregation_slices(hpx::future<void> &stream_future) {
186186
assert(!async_mode);
187187
assert(potential_async_promises.empty());
188188
const size_t local_counter = slice_counter++;
@@ -192,7 +192,7 @@ template <typename Executor> class aggregated_function_call {
192192
else return false;
193193
}
194194
template <typename F, typename... Ts>
195-
void post_when(hpx::lcos::future<void> &stream_future, F &&f, Ts &&...ts) {
195+
void post_when(hpx::future<void> &stream_future, F &&f, Ts &&...ts) {
196196
#if !(defined(NDEBUG)) && defined(DEBUG_AGGREGATION_CALLS)
197197
// needed for concurrent access to function_tuple and debug_type_information
198198
// Not required for normal use
@@ -265,7 +265,7 @@ template <typename Executor> class aggregated_function_call {
265265
}
266266
}
267267
template <typename F, typename... Ts>
268-
hpx::lcos::future<void> async_when(hpx::lcos::future<void> &stream_future,
268+
hpx::future<void> async_when(hpx::future<void> &stream_future,
269269
F &&f, Ts &&...ts) {
270270
#if !(defined(NDEBUG)) && defined(DEBUG_AGGREGATION_CALLS)
271271
// needed for concurrent access to function_tuple and debug_type_information
@@ -330,7 +330,7 @@ template <typename Executor> class aggregated_function_call {
330330
assert(local_counter < number_slices);
331331
assert(slice_counter < number_slices + 1);
332332
assert(potential_async_promises.size() == number_slices);
333-
hpx::lcos::future<void> ret_fut =
333+
hpx::future<void> ret_fut =
334334
potential_async_promises[local_counter].get_future();
335335
if (local_counter == number_slices - 1) {
336336
/* slices_ready_promise.set_value(); */
@@ -347,15 +347,15 @@ template <typename Executor> class aggregated_function_call {
347347
return ret_fut;
348348
}
349349
template <typename F, typename... Ts>
350-
hpx::lcos::shared_future<void> wrap_async(hpx::lcos::future<void> &stream_future,
350+
hpx::shared_future<void> wrap_async(hpx::future<void> &stream_future,
351351
F &&f, Ts &&...ts) {
352352
assert(async_mode);
353353
assert(!potential_async_promises.empty());
354354
const size_t local_counter = slice_counter++;
355355
assert(local_counter < number_slices);
356356
assert(slice_counter < number_slices + 1);
357357
assert(potential_async_promises.size() == number_slices);
358-
hpx::lcos::shared_future<void> ret_fut =
358+
hpx::shared_future<void> ret_fut =
359359
potential_async_promises[local_counter].get_shared_future();
360360
if (local_counter == number_slices - 1) {
361361
auto fut = f(std::forward<Ts>(ts)...);
@@ -496,11 +496,11 @@ template <typename Executor> class aggregated_executor {
496496
launch_counter++;
497497
}
498498
template <typename F, typename... Ts>
499-
hpx::lcos::future<void> async(F &&f, Ts &&...ts) {
499+
hpx::future<void> async(F &&f, Ts &&...ts) {
500500
// we should only execute function calls once all slices
501501
// have been given away (-> Executor Slices start)
502502
assert(parent.slices_exhausted == true);
503-
hpx::lcos::future<void> ret_fut = parent.async(
503+
hpx::future<void> ret_fut = parent.async(
504504
launch_counter, std::forward<F>(f), std::forward<Ts>(ts)...);
505505
launch_counter++;
506506
return ret_fut;
@@ -525,11 +525,11 @@ template <typename Executor> class aggregated_executor {
525525
}
526526
527527
template <typename F, typename... Ts>
528-
hpx::lcos::shared_future<void> wrap_async(F &&f, Ts &&...ts) {
528+
hpx::shared_future<void> wrap_async(F &&f, Ts &&...ts) {
529529
// we should only execute function calls once all slices
530530
// have been given away (-> Executor Slices start)
531531
assert(parent.slices_exhausted == true);
532-
hpx::lcos::shared_future<void> ret_fut = parent.wrap_async(
532+
hpx::shared_future<void> ret_fut = parent.wrap_async(
533533
launch_counter, std::forward<F>(f), std::forward<Ts>(ts)...);
534534
launch_counter++;
535535
return ret_fut;
@@ -557,10 +557,10 @@ template <typename Executor> class aggregated_executor {
557557
558558
//===============================================================================
559559
560-
hpx::lcos::local::promise<void> slices_full_promise;
560+
hpx::local::promise<void> slices_full_promise;
561561
/// Promises with the slice executors -- to be set when the starting criteria
562562
/// is met
563-
std::vector<hpx::lcos::local::promise<executor_slice>> executor_slices;
563+
std::vector<hpx::local::promise<executor_slice>> executor_slices;
564564
/// List of aggregated function calls - function will be launched when all
565565
/// slices have called it
566566
std::deque<aggregated_function_call<Executor>> function_calls;
@@ -715,8 +715,8 @@ template <typename Executor> class aggregated_executor {
715715
//===============================================================================
716716
// Public Interface
717717
public:
718-
hpx::lcos::future<void> current_continuation;
719-
hpx::lcos::future<void> last_stream_launch_done;
718+
hpx::future<void> current_continuation;
719+
hpx::future<void> last_stream_launch_done;
720720
std::atomic<size_t> overall_launch_counter = 0;
721721
722722
/// Only meant to be accessed by the slice executors
@@ -764,7 +764,7 @@ template <typename Executor> class aggregated_executor {
764764
765765
/// Only meant to be accessed by the slice executors
766766
template <typename F, typename... Ts>
767-
hpx::lcos::future<void> async(const size_t slice_launch_counter, F &&f,
767+
hpx::future<void> async(const size_t slice_launch_counter, F &&f,
768768
Ts &&...ts) {
769769
std::lock_guard<aggregation_mutex_t> guard(mut);
770770
assert(slices_exhausted == true);
@@ -785,7 +785,7 @@ template <typename Executor> class aggregated_executor {
785785
}
786786
/// Only meant to be accessed by the slice executors
787787
template <typename F, typename... Ts>
788-
hpx::lcos::shared_future<void> wrap_async(const size_t slice_launch_counter, F &&f,
788+
hpx::shared_future<void> wrap_async(const size_t slice_launch_counter, F &&f,
789789
Ts &&...ts) {
790790
std::lock_guard<aggregation_mutex_t> guard(mut);
791791
assert(slices_exhausted == true);
@@ -810,7 +810,7 @@ template <typename Executor> class aggregated_executor {
810810
return !slices_exhausted;
811811
}
812812
813-
std::optional<hpx::lcos::future<executor_slice>> request_executor_slice() {
813+
std::optional<hpx::future<executor_slice>> request_executor_slice() {
814814
std::lock_guard<aggregation_mutex_t> guard(mut);
815815
if (!slices_exhausted) {
816816
const size_t local_slice_id = ++current_slices;
@@ -839,14 +839,14 @@ template <typename Executor> class aggregated_executor {
839839
dealloc_counter = 0;
840840
841841
if (mode == aggregated_executor_modes::STRICT ) {
842-
slices_full_promise = hpx::lcos::local::promise<void>{};
842+
slices_full_promise = hpx::local::promise<void>{};
843843
}
844844
}
845845
846846
// Create Executor Slice future -- that will be returned later
847-
hpx::lcos::future<executor_slice> ret_fut;
847+
hpx::future<executor_slice> ret_fut;
848848
if (local_slice_id < max_slices) {
849-
executor_slices.emplace_back(hpx::lcos::local::promise<executor_slice>{});
849+
executor_slices.emplace_back(hpx::local::promise<executor_slice>{});
850850
ret_fut =
851851
executor_slices[local_slice_id - 1].get_future();
852852
} else {
@@ -871,7 +871,7 @@ template <typename Executor> class aggregated_executor {
871871
gpu_id));
872872
// Renew promise that all slices will be ready as the primary launch
873873
// criteria...
874-
hpx::lcos::shared_future<void> fut;
874+
hpx::shared_future<void> fut;
875875
if (mode == aggregated_executor_modes::EAGER ||
876876
mode == aggregated_executor_modes::ENDLESS) {
877877
// Fallback launch condidtion: Launch as soon as the underlying stream
@@ -922,7 +922,7 @@ template <typename Executor> class aggregated_executor {
922922
return ret_fut;
923923
} else {
924924
// Return empty optional as failure
925-
return std::optional<hpx::lcos::future<executor_slice>>{};
925+
return std::optional<hpx::future<executor_slice>>{};
926926
}
927927
}
928928
size_t launched_slices;

tests/work_aggregation_cpu_triad.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ void triad_kernel(float_t *A, const float_t *B, const float_t *C, const float_t
3636
/// production use!
3737
struct Dummy_Executor {
3838
/// Executor is always ready
39-
hpx::lcos::future<void> get_future() {
39+
hpx::future<void> get_future() {
4040
// To trigger interruption in exeuctor coalesing manually with the promise
4141
// For a proper CUDA executor we would get a future that's ready once the
4242
// stream is ready of course!
@@ -48,7 +48,7 @@ struct Dummy_Executor {
4848
}
4949
/// async -- executores immediately and returns ready future
5050
template <typename F, typename... Ts>
51-
hpx::lcos::future<void> async(F &&f, Ts &&...ts) {
51+
hpx::future<void> async(F &&f, Ts &&...ts) {
5252
f(std::forward<Ts>(ts)...);
5353
return hpx::make_ready_future();
5454
}
@@ -213,7 +213,7 @@ int hpx_main(int argc, char *argv[]) {
213213
const float_t scalar = 3.0;
214214

215215
size_t number_tasks = problem_size / kernel_size;
216-
std::vector<hpx::lcos::future<void>> futs;
216+
std::vector<hpx::future<void>> futs;
217217

218218
for (size_t task_id = 0; task_id < number_tasks; task_id++) {
219219
// Concurrency Wrapper: Splits stream benchmark into #number_tasks tasks
@@ -222,7 +222,7 @@ int hpx_main(int argc, char *argv[]) {
222222
if (slice_fut1.has_value()) {
223223
// Work aggregation Wrapper: Recombines (some) tasks, depending on the
224224
// number of slices
225-
hpx::lcos::future<void> current_fut =
225+
hpx::future<void> current_fut =
226226
slice_fut1.value().then([&, task_id](auto &&fut) {
227227
auto slice_exec = fut.get();
228228

@@ -258,11 +258,11 @@ int hpx_main(int argc, char *argv[]) {
258258
return current_fut;
259259
} else {
260260
hpx::cout << "ERROR: Executor was not properly initialized!" << std::endl;
261-
return hpx::lcos::make_ready_future();
261+
return hpx::make_ready_future();
262262
}
263263
}));
264264
}
265-
auto final_fut = hpx::lcos::when_all(futs);
265+
auto final_fut = hpx::when_all(futs);
266266
final_fut.get();
267267

268268
bool results_correct = true;

tests/work_aggregation_cuda_triad.cpp

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,7 @@ int hpx_main(int argc, char *argv[]) {
183183

184184

185185
size_t number_tasks = problem_size / kernel_size;
186-
std::vector<hpx::lcos::future<void>> futs;
186+
std::vector<hpx::future<void>> futs;
187187
cudaError_t(*func)(void*,const void*,size_t,cudaMemcpyKind,cudaStream_t) = cudaMemcpyAsync;
188188

189189

@@ -196,7 +196,7 @@ int hpx_main(int argc, char *argv[]) {
196196
if (slice_fut1.has_value()) {
197197
// Work aggregation Wrapper: Recombines (some) tasks, depending on the
198198
// number of slices
199-
hpx::lcos::future<void> current_fut =
199+
hpx::future<void> current_fut =
200200
slice_fut1.value().then([&, task_id](auto &&fut) {
201201
auto slice_exec = fut.get();
202202

@@ -281,11 +281,11 @@ int hpx_main(int argc, char *argv[]) {
281281
return current_fut;
282282
} else {
283283
hpx::cout << "ERROR: Executor was not properly initialized!" << std::endl;
284-
return hpx::lcos::make_ready_future();
284+
return hpx::make_ready_future();
285285
}
286286
}));
287287
}
288-
auto final_fut = hpx::lcos::when_all(futs);
288+
auto final_fut = hpx::when_all(futs);
289289
final_fut.get();
290290
std::chrono::steady_clock::time_point end =
291291
std::chrono::steady_clock::now();
@@ -353,7 +353,7 @@ int hpx_main(int argc, char *argv[]) {
353353
for (size_t repetition = 0; repetition < repetitions; repetition++) {
354354

355355
size_t number_tasks = problem_size / kernel_size;
356-
std::vector<hpx::lcos::future<void>> futs;
356+
std::vector<hpx::future<void>> futs;
357357
cudaError_t(*func)(void*,const void*,size_t,cudaMemcpyKind,cudaStream_t) = cudaMemcpyAsync;
358358

359359

@@ -366,7 +366,7 @@ int hpx_main(int argc, char *argv[]) {
366366
if (slice_fut1.has_value()) {
367367
// Work aggregation Wrapper: Recombines (some) tasks, depending on the
368368
// number of slices
369-
hpx::lcos::future<void> current_fut =
369+
hpx::future<void> current_fut =
370370
slice_fut1.value().then([&, task_id](auto &&fut) {
371371
auto slice_exec = fut.get();
372372

@@ -396,11 +396,11 @@ int hpx_main(int argc, char *argv[]) {
396396
return hpx::make_ready_future();
397397
} else {
398398
hpx::cout << "ERROR: Executor was not properly initialized!" << std::endl;
399-
return hpx::lcos::make_ready_future();
399+
return hpx::make_ready_future();
400400
}
401401
}));
402402
}
403-
auto final_fut = hpx::lcos::when_all(futs);
403+
auto final_fut = hpx::when_all(futs);
404404
final_fut.get();
405405
std::chrono::steady_clock::time_point end =
406406
std::chrono::steady_clock::now();

0 commit comments

Comments
 (0)