-
Notifications
You must be signed in to change notification settings - Fork 98
Feat/conv 1 d #1907
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: conv
Are you sure you want to change the base?
Feat/conv 1 d #1907
Changes from 4 commits
a59f27c
c629e5b
630a24d
74009f8
83c52a8
f34972b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,3 @@ | ||
add_executable(conv conv.cpp) | ||
target_link_libraries(conv PRIVATE Ginkgo::ginkgo) | ||
target_include_directories(conv PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,45 @@ | ||
// SPDX-FileCopyrightText: 2025 The Ginkgo authors | ||
// | ||
// SPDX-License-Identifier: BSD-3-Clause | ||
|
||
#include <iostream> | ||
#include <memory> | ||
#include <vector> | ||
|
||
#include <ginkgo/ginkgo.hpp> | ||
|
||
int main() | ||
{ | ||
using ValueType = double; | ||
using Vec = gko::matrix::Dense<ValueType>; | ||
|
||
// Executor setup | ||
auto exec = gko::ReferenceExecutor::create(); | ||
|
||
// Convolution kernel (length K) as a gko::array on the executor | ||
std::vector<ValueType> kernel_vals{1.0, 2.0, 3.0}; | ||
gko::array<ValueType> kernel_array(exec, kernel_vals.begin(), | ||
kernel_vals.end()); | ||
auto conv_op = gko::matrix::Conv<ValueType>::create(exec, kernel_array); | ||
|
||
// Input signal (length N) as a Dense vector | ||
auto input = gko::initialize<Vec>({4.0, 5.0, 6.0, 7.0}, exec); | ||
|
||
// Allocate output Dense vector: floor((N + 2*padding - K) / stride) + 1 | ||
// elements | ||
const gko::size_type output_length = | ||
(input->get_size()[0] + 2 * 2 - kernel_vals.size()) / 1 + 1; | ||
std::cout << "Output length: " << output_length << std::endl; | ||
auto output = Vec::create(exec, gko::dim<2>{output_length, 1}); | ||
output->fill(0.0); | ||
|
||
// Apply convolution: conv_op * input -> output | ||
conv_op->apply(gko::lend(input), gko::lend(output)); | ||
|
||
// Output the results | ||
std::cout << "Convolution result: "; | ||
for (gko::size_type i = 0; i < output_length; ++i) { | ||
std::cout << output->at(i, 0) << " "; | ||
} | ||
std::cout << std::endl; | ||
} |
Original file line number | Diff line number | Diff line change | ||||||||
---|---|---|---|---|---|---|---|---|---|---|
|
@@ -24,10 +24,34 @@ namespace conv { | |||||||||
|
||||||||||
template <typename ValueType> | ||||||||||
void conv(std::shared_ptr<const DefaultExecutor> exec, | ||||||||||
const matrix::Conv<ValueType>* kernel, | ||||||||||
const matrix::Dense<ValueType>* b, matrix::Dense<ValueType>* x) | ||||||||||
const array<ValueType>& kernel, const matrix::Dense<ValueType>* b, | ||||||||||
matrix::Dense<ValueType>* x) | ||||||||||
{ | ||||||||||
GKO_NOT_IMPLEMENTED; | ||||||||||
const auto b_size = b->get_size(); // (N, 1) | ||||||||||
const auto x_size = x->get_size(); // (N + K - 1, 1) | ||||||||||
const auto kernel_size = kernel.get_size(); // K | ||||||||||
const auto* kernel_ptr = kernel.get_const_data(); // pointer to kernel data | ||||||||||
int stride = 1; | ||||||||||
int padding = 2; | ||||||||||
int output_length = (x_size[0] + 2 * padding - kernel_size) / stride + 1; | ||||||||||
|
||||||||||
for (gko::size_type i = 0; i < x_size[0]; ++i) { | ||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. because you are using substraction later, so I will say you can use gko::int64 directly as Tobias mentioned There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. also I think start with output will be easier than with input |
||||||||||
ValueType sum = zero<ValueType>(); | ||||||||||
std::ptrdiff_t start = | ||||||||||
static_cast<std::ptrdiff_t>(i * stride) - padding; | ||||||||||
for (gko::size_type j = 0; j < kernel_size; ++j) { | ||||||||||
std::ptrdiff_t b_idx = | ||||||||||
start + | ||||||||||
static_cast<std::ptrdiff_t>( | ||||||||||
j); // calculate the index in b's row based on the current | ||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We might not need the multplication with stride here since we already multiply with stride while calculating |
||||||||||
// position in x and the kernel's stride and padding | ||||||||||
if (b_idx >= 0 && b_idx < static_cast<std::ptrdiff_t>(b_size[0])) { | ||||||||||
sum += kernel_ptr[j] * b->at(static_cast<gko::size_type>(b_idx), | ||||||||||
0); // direct pointer access | ||||||||||
} | ||||||||||
} | ||||||||||
x->at(i, 0) = sum; | ||||||||||
} | ||||||||||
} | ||||||||||
|
||||||||||
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_CONV_KERNEL); | ||||||||||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
shouldn't padding be zero now?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
padding equal 2 in your example such that it become numpy result.
IMO, we should follow the default from torch not numpy.
also, if it needs to fit numpy, it should be something like ((x-1)*stride + kernel - b)/2
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The example also matches the torch result. We can change the padding inside the
conv_kernels.cpp
. It's result would be same as this function:torch.nn.functional.conv1d(x, w, bias=None, stride=1, padding=2)
We can accept padding in arguments but currently we don't do that, so i hard coded it to be 2. The function also works for zero-padding. For zero padding, it should be similar to:
torch.nn.functional.conv1d(x, w, bias=None, stride=1, padding=0)
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I changed it to zero but it will work if change it to any other padding as well