Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions infini_train/src/core/cuda/cuda_guard_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,10 @@ void CudaGuardImpl::SynchronizeDevice(Device device) const {
SetDevice(original_device);
}

void CudaGuardImpl::SynchronizeStream(Stream *stream) const {
CUDA_CHECK(cudaStreamSynchronize(dynamic_cast<CudaStream *>(stream)->cuda_stream()));
}

// blas
BlasHandle *CudaGuardImpl::GetBlasHandle(Device device) const {
CheckCudaDevice(device);
Expand Down
2 changes: 2 additions & 0 deletions infini_train/src/core/cuda/cuda_guard_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,8 @@ class CudaGuardImpl final : public DeviceGuardImpl {
// sync
void SynchronizeDevice(Device device) const override;

void SynchronizeStream(Stream *) const override;

// blas
BlasHandle *GetBlasHandle(Device device) const override;

Expand Down
8 changes: 8 additions & 0 deletions infini_train/src/kernels/cuda/concat.cu
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,10 @@ std::shared_ptr<Tensor> ConcatForward(const std::vector<std::shared_ptr<Tensor>>
CUDA_CHECK(cudaMallocAsync(&device_offsets, sizeof(int64_t) * (num_inputs + 1), stream));
CUDA_CHECK(cudaMemcpyAsync(device_offsets, host_offsets.data(), sizeof(int64_t) * (num_inputs + 1),
cudaMemcpyHostToDevice, stream));
// NOTE(dcj):
// Synchronize the stream here to ensure all preceding H2D/D2H memcpy
// operations have completed before the host buffers go out of scope.
CUDA_CHECK(cudaStreamSynchronize(stream));

ConcatForwardKernel<T><<<num_blocks, threads_per_block, 0, stream>>>(
device_input_ptrs, static_cast<T *>(output->DataPtr()), device_offsets, N, D, num_inputs, K_total);
Expand Down Expand Up @@ -225,6 +229,10 @@ std::vector<std::shared_ptr<Tensor>> ConcatBackward(const std::shared_ptr<Tensor
CUDA_CHECK(cudaMallocAsync(&device_offsets, sizeof(int64_t) * (num_inputs + 1), stream));
CUDA_CHECK(cudaMemcpyAsync(device_offsets, host_offsets.data(), sizeof(int64_t) * (num_inputs + 1),
cudaMemcpyHostToDevice, stream));
// NOTE(dcj):
// Synchronize the stream here to ensure all preceding H2D/D2H memcpy
// operations have completed before the host buffers go out of scope.
CUDA_CHECK(cudaStreamSynchronize(stream));

ConcatBackwardKernel<T><<<num_blocks, threads_per_block, 0, stream>>>(
static_cast<const T *>(grad_output->DataPtr()), device_ptrs, device_offsets, N, D, num_inputs, K_total);
Expand Down
23 changes: 16 additions & 7 deletions infini_train/src/kernels/cuda/elementwise.cu
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ void LaunchForward(Func func, const std::shared_ptr<Tensor> &output, const Input
auto out_stride_host = ComputeStrides(out_shape);

int64_t *device_buffer;
cudaMallocAsync(&device_buffer, 5 * ndim * sizeof(int64_t), cuda_stream);
CUDA_CHECK(cudaMallocAsync(&device_buffer, 5 * ndim * sizeof(int64_t), cuda_stream));

int64_t *device_a_strides, *device_b_strides, *device_out_strides, *device_a_shape, *device_b_shape;
device_a_strides = device_buffer + ndim * 0;
Expand All @@ -123,8 +123,12 @@ void LaunchForward(Func func, const std::shared_ptr<Tensor> &output, const Input
host_buffer.insert(host_buffer.end(), a_shape.begin(), a_shape.end());
host_buffer.insert(host_buffer.end(), b_shape.begin(), b_shape.end());

cudaMemcpyAsync(device_buffer, host_buffer.data(), 5 * ndim * sizeof(int64_t), cudaMemcpyHostToDevice,
cuda_stream);
CUDA_CHECK(cudaMemcpyAsync(device_buffer, host_buffer.data(), 5 * ndim * sizeof(int64_t),
cudaMemcpyHostToDevice, cuda_stream));
// NOTE(dcj):
// Synchronize the stream here to ensure all preceding H2D/D2H memcpy
// operations have completed before the host buffers go out of scope.
CUDA_CHECK(cudaStreamSynchronize(cuda_stream));

LaunchKernel<BLOCK_SIZE, T>(
[&](dim3 grid, dim3 block, size_t offset, const T *a_ptr, const T *b_ptr) {
Expand All @@ -134,7 +138,7 @@ void LaunchForward(Func func, const std::shared_ptr<Tensor> &output, const Input
},
output, inputs...);

cudaFreeAsync(device_buffer, cuda_stream);
CUDA_CHECK(cudaFreeAsync(device_buffer, cuda_stream));
} else {
static_assert(sizeof...(inputs) == 1 || sizeof...(inputs) == 2,
"LaunchForward currently only supports unary and binary operations.");
Expand Down Expand Up @@ -538,7 +542,7 @@ void LaunchBackward(FuncA fun_a, FuncB fun_b, const std::shared_ptr<Tensor> &out
auto out_stride_host = ComputeStrides(out_shape);

int64_t *device_buffer;
cudaMallocAsync(&device_buffer, 5 * ndim * sizeof(int64_t), stream);
CUDA_CHECK(cudaMallocAsync(&device_buffer, 5 * ndim * sizeof(int64_t), stream));

int64_t *device_a_strides, *device_b_strides, *device_out_strides, *device_a_shape, *device_b_shape;
device_a_strides = device_buffer + ndim * 0;
Expand All @@ -554,7 +558,12 @@ void LaunchBackward(FuncA fun_a, FuncB fun_b, const std::shared_ptr<Tensor> &out
host_buffer.insert(host_buffer.end(), a_shape.begin(), a_shape.end());
host_buffer.insert(host_buffer.end(), b_shape.begin(), b_shape.end());

cudaMemcpyAsync(device_buffer, host_buffer.data(), 5 * ndim * sizeof(int64_t), cudaMemcpyHostToDevice, stream);
CUDA_CHECK(
cudaMemcpyAsync(device_buffer, host_buffer.data(), 5 * ndim * sizeof(int64_t), cudaMemcpyHostToDevice, stream));
// NOTE(dcj):
// Synchronize the stream here to ensure all preceding H2D/D2H memcpy
// operations have completed before the host buffers go out of scope.
CUDA_CHECK(cudaStreamSynchronize(stream));

const size_t num_elements = grad_output->NumElements();

Expand Down Expand Up @@ -616,7 +625,7 @@ void LaunchBackward(FuncA fun_a, FuncB fun_b, const std::shared_ptr<Tensor> &out
},
output_a, inputs...);
}
cudaFreeAsync(device_buffer, stream);
CUDA_CHECK(cudaFreeAsync(device_buffer, stream));
}

template <typename Func> std::shared_ptr<Tensor> UnaryForward(const std::shared_ptr<Tensor> &input, Func unary_fn) {
Expand Down
8 changes: 8 additions & 0 deletions infini_train/src/kernels/cuda/gather.cu
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,10 @@ std::shared_ptr<Tensor> IndexGatherForward(const std::shared_ptr<Tensor> &input,
cudaMemcpyAsync(in_strides_dev, in_strides.data(), num_dims * sizeof(int64_t), cudaMemcpyHostToDevice, stream));
CUDA_CHECK(cudaMemcpyAsync(out_strides_dev, out_strides.data(), num_dims * sizeof(int64_t), cudaMemcpyHostToDevice,
stream));
// NOTE(dcj):
// Synchronize the stream here to ensure all preceding H2D/D2H memcpy
// operations have completed before the host buffers go out of scope.
CUDA_CHECK(cudaStreamSynchronize(stream));

const int threads = 256;
const int blocks = (total_elements + threads - 1) / threads;
Expand Down Expand Up @@ -203,6 +207,10 @@ std::shared_ptr<Tensor> IndexGatherBackward(const std::shared_ptr<Tensor> &grad_
cudaMemcpyHostToDevice, stream));
CUDA_CHECK(cudaMemcpyAsync(out_strides_dev, out_strides.data(), n_out_strides * sizeof(int64_t),
cudaMemcpyHostToDevice, stream));
// NOTE(dcj):
// Synchronize the stream here to ensure all preceding H2D/D2H memcpy
// operations have completed before the host buffers go out of scope.
CUDA_CHECK(cudaStreamSynchronize(stream));

const int threads = 256;
const int blocks = (int)((total_elements + threads - 1) / threads);
Expand Down
56 changes: 35 additions & 21 deletions infini_train/src/kernels/cuda/slice.cu
Original file line number Diff line number Diff line change
Expand Up @@ -73,21 +73,28 @@ std::shared_ptr<Tensor> SliceForward(const std::shared_ptr<Tensor> &input, const
infini_train::core::GetDeviceGuardImpl(device.type())->GetStream(device))
->cuda_stream();

cudaMallocAsync(&new_dims_dev,
(ends.size() + starts.size() + steps.size() + dims.size() + new_dims.size()) * sizeof(int64_t),
stream);
CUDA_CHECK(cudaMallocAsync(
&new_dims_dev, (ends.size() + starts.size() + steps.size() + dims.size() + new_dims.size()) * sizeof(int64_t),
stream));
starts_dev = new_dims_dev + ends.size();
steps_dev = starts_dev + starts.size();
input_strides_dev = steps_dev + steps.size();
output_strides_dev = input_strides_dev + dims.size();

cudaMemcpyAsync(new_dims_dev, new_dims.data(), ends.size() * sizeof(int64_t), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(starts_dev, starts.data(), starts.size() * sizeof(int64_t), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(steps_dev, steps.data(), steps.size() * sizeof(int64_t), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(input_strides_dev, src_strides.data(), dims.size() * sizeof(int64_t), cudaMemcpyHostToDevice,
stream);
cudaMemcpyAsync(output_strides_dev, dst_strides.data(), new_dims.size() * sizeof(int64_t), cudaMemcpyHostToDevice,
stream);
CUDA_CHECK(
cudaMemcpyAsync(new_dims_dev, new_dims.data(), ends.size() * sizeof(int64_t), cudaMemcpyHostToDevice, stream));
CUDA_CHECK(
cudaMemcpyAsync(starts_dev, starts.data(), starts.size() * sizeof(int64_t), cudaMemcpyHostToDevice, stream));
CUDA_CHECK(
cudaMemcpyAsync(steps_dev, steps.data(), steps.size() * sizeof(int64_t), cudaMemcpyHostToDevice, stream));
CUDA_CHECK(cudaMemcpyAsync(input_strides_dev, src_strides.data(), dims.size() * sizeof(int64_t),
cudaMemcpyHostToDevice, stream));
CUDA_CHECK(cudaMemcpyAsync(output_strides_dev, dst_strides.data(), new_dims.size() * sizeof(int64_t),
cudaMemcpyHostToDevice, stream));
// NOTE(dcj):
// Synchronize the stream here to ensure all preceding H2D/D2H memcpy
// operations have completed before the host buffers go out of scope.
CUDA_CHECK(cudaStreamSynchronize(stream));

int threads_per_block = 256;
int num_blocks = (total_elements + threads_per_block - 1) / threads_per_block;
Expand Down Expand Up @@ -167,21 +174,28 @@ std::shared_ptr<Tensor> SliceBackward(const std::shared_ptr<Tensor> &grad_output
const auto &stream = dynamic_cast<infini_train::core::cuda::CudaStream *>(
infini_train::core::GetDeviceGuardImpl(device.type())->GetStream(device))
->cuda_stream();
cudaMallocAsync(&new_dims_dev,
(ends.size() + starts.size() + steps.size() + dims.size() + new_dims.size()) * sizeof(int64_t),
stream);
CUDA_CHECK(cudaMallocAsync(
&new_dims_dev, (ends.size() + starts.size() + steps.size() + dims.size() + new_dims.size()) * sizeof(int64_t),
stream));
starts_dev = new_dims_dev + ends.size();
steps_dev = starts_dev + starts.size();
input_strides_dev = steps_dev + steps.size();
output_strides_dev = input_strides_dev + dims.size();

cudaMemcpyAsync(new_dims_dev, new_dims.data(), ends.size() * sizeof(int64_t), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(starts_dev, starts.data(), starts.size() * sizeof(int64_t), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(steps_dev, steps.data(), steps.size() * sizeof(int64_t), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(input_strides_dev, src_strides.data(), dims.size() * sizeof(int64_t), cudaMemcpyHostToDevice,
stream);
cudaMemcpyAsync(output_strides_dev, dst_strides.data(), new_dims.size() * sizeof(int64_t), cudaMemcpyHostToDevice,
stream);
CUDA_CHECK(
cudaMemcpyAsync(new_dims_dev, new_dims.data(), ends.size() * sizeof(int64_t), cudaMemcpyHostToDevice, stream));
CUDA_CHECK(
cudaMemcpyAsync(starts_dev, starts.data(), starts.size() * sizeof(int64_t), cudaMemcpyHostToDevice, stream));
CUDA_CHECK(
cudaMemcpyAsync(steps_dev, steps.data(), steps.size() * sizeof(int64_t), cudaMemcpyHostToDevice, stream));
CUDA_CHECK(cudaMemcpyAsync(input_strides_dev, src_strides.data(), dims.size() * sizeof(int64_t),
cudaMemcpyHostToDevice, stream));
CUDA_CHECK(cudaMemcpyAsync(output_strides_dev, dst_strides.data(), new_dims.size() * sizeof(int64_t),
cudaMemcpyHostToDevice, stream));
// NOTE(dcj):
// Synchronize the stream here to ensure all preceding H2D/D2H memcpy
// operations have completed before the host buffers go out of scope.
CUDA_CHECK(cudaStreamSynchronize(stream));

int threads_per_block = 256;
int num_blocks = (total_elements + threads_per_block - 1) / threads_per_block;
Expand All @@ -195,7 +209,7 @@ std::shared_ptr<Tensor> SliceBackward(const std::shared_ptr<Tensor> &grad_output
},
"CUDA SliceBackward");

cudaFreeAsync(new_dims_dev, stream);
CUDA_CHECK(cudaFreeAsync(new_dims_dev, stream));

return grad_input;
}
Expand Down
16 changes: 11 additions & 5 deletions infini_train/src/kernels/cuda/split.cu
Original file line number Diff line number Diff line change
Expand Up @@ -133,18 +133,24 @@ std::shared_ptr<Tensor> LaunchSplitBackward(const std::vector<int64_t> &input_di
void *device_ptr;
const T **device_grad_output_ptrs;
int64_t *device_H_outs;
cudaMallocAsync(&device_ptr, (sizeof(T *) + sizeof(int64_t)) * num_splits, stream);
CUDA_CHECK(cudaMallocAsync(&device_ptr, (sizeof(T *) + sizeof(int64_t)) * num_splits, stream));
device_grad_output_ptrs = (const T **)(device_ptr);
device_H_outs = reinterpret_cast<int64_t *>(device_grad_output_ptrs + num_splits);

cudaMemcpyAsync(device_grad_output_ptrs, host_grad_output_ptrs.data(), sizeof(T *) * num_splits,
cudaMemcpyHostToDevice, stream);
CUDA_CHECK(cudaMemcpyAsync(device_grad_output_ptrs, host_grad_output_ptrs.data(), sizeof(T *) * num_splits,
cudaMemcpyHostToDevice, stream));

// init H_out for each split
std::vector<int64_t> H_outs(num_splits);
for (int i = 0; i < num_splits; ++i) { H_outs[i] = std::min(split_size, H_in - i * split_size); }

cudaMemcpyAsync(device_H_outs, H_outs.data(), sizeof(int64_t) * num_splits, cudaMemcpyHostToDevice, stream);
CUDA_CHECK(
cudaMemcpyAsync(device_H_outs, H_outs.data(), sizeof(int64_t) * num_splits, cudaMemcpyHostToDevice, stream));

// NOTE(dcj):
// Synchronize the stream here to ensure all preceding H2D/D2H memcpy
// operations have completed before the host buffers go out of scope.
CUDA_CHECK(cudaStreamSynchronize(stream));

int64_t total_elements = N * H_in * W;
int threads_per_block = 256;
Expand All @@ -154,7 +160,7 @@ std::shared_ptr<Tensor> LaunchSplitBackward(const std::vector<int64_t> &input_di
static_cast<T *>(grad_input->DataPtr()), N, H_in,
W, split_size, num_splits, device_H_outs);

cudaFreeAsync(device_ptr, stream);
CUDA_CHECK(cudaFreeAsync(device_ptr, stream));

return grad_input;
}
Expand Down
23 changes: 16 additions & 7 deletions infini_train/src/kernels/cuda/stack.cu
Original file line number Diff line number Diff line change
Expand Up @@ -67,14 +67,18 @@ std::shared_ptr<Tensor> StackForward(const std::vector<std::shared_ptr<Tensor>>
for (const auto &t : inputs) { host_input_ptrs.push_back(static_cast<const T *>(t->DataPtr())); }

const T **device_input_ptrs;
cudaMallocAsync(&device_input_ptrs, sizeof(T *) * num_inputs, stream);
cudaMemcpyAsync(device_input_ptrs, host_input_ptrs.data(), sizeof(T *) * num_inputs, cudaMemcpyHostToDevice,
stream);
CUDA_CHECK(cudaMallocAsync(&device_input_ptrs, sizeof(T *) * num_inputs, stream));
CUDA_CHECK(cudaMemcpyAsync(device_input_ptrs, host_input_ptrs.data(), sizeof(T *) * num_inputs,
cudaMemcpyHostToDevice, stream));
// NOTE(dcj):
// Synchronize the stream here to ensure all preceding H2D/D2H memcpy
// operations have completed before the host buffers go out of scope.
CUDA_CHECK(cudaStreamSynchronize(stream));

StackForwardKernel<<<num_blocks, threads_per_block, 0, stream>>>(
device_input_ptrs, static_cast<T *>(output->DataPtr()), N, D, num_inputs);

cudaFreeAsync(device_input_ptrs, stream);
CUDA_CHECK(cudaFreeAsync(device_input_ptrs, stream));
},
"CUDA StackForward");

Expand Down Expand Up @@ -136,13 +140,18 @@ std::vector<std::shared_ptr<Tensor>> StackBackward(const std::vector<int64_t> &i
for (auto &t : grads) { host_ptrs.push_back(static_cast<T *>(t->DataPtr())); }

T **device_ptrs;
cudaMallocAsync(&device_ptrs, sizeof(T *) * num_inputs, stream);
cudaMemcpyAsync(device_ptrs, host_ptrs.data(), sizeof(T *) * num_inputs, cudaMemcpyHostToDevice, stream);
CUDA_CHECK(cudaMallocAsync(&device_ptrs, sizeof(T *) * num_inputs, stream));
CUDA_CHECK(cudaMemcpyAsync(device_ptrs, host_ptrs.data(), sizeof(T *) * num_inputs, cudaMemcpyHostToDevice,
stream));
// NOTE(dcj):
// Synchronize the stream here to ensure all preceding H2D/D2H memcpy
// operations have completed before the host buffers go out of scope.
CUDA_CHECK(cudaStreamSynchronize(stream));

StackBackwardKernel<<<num_blocks, threads_per_block, 0, stream>>>(
static_cast<const T *>(grad_output->DataPtr()), device_ptrs, N, D, num_inputs);

cudaFreeAsync(device_ptrs, stream);
CUDA_CHECK(cudaFreeAsync(device_ptrs, stream));
},
"CUDA StackBackward");

Expand Down
11 changes: 8 additions & 3 deletions infini_train/src/kernels/cuda/transform.cu
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,7 @@ std::shared_ptr<Tensor> TransposeForward(const std::shared_ptr<Tensor> &input, i
// Allocate device memory for dims and strides
// TODO(zbl): avoid using cudaMalloc?
int64_t *device_buffer;
cudaMallocAsync(&device_buffer, 3 * ndim * sizeof(int64_t), stream);
CUDA_CHECK(cudaMallocAsync(&device_buffer, 3 * ndim * sizeof(int64_t), stream));

int64_t *in_dims_dev = device_buffer;
int64_t *in_strides_dev = device_buffer + ndim;
Expand All @@ -263,7 +263,12 @@ std::shared_ptr<Tensor> TransposeForward(const std::shared_ptr<Tensor> &input, i
host_buffer.insert(host_buffer.end(), in_strides.begin(), in_strides.end());
host_buffer.insert(host_buffer.end(), out_strides.begin(), out_strides.end());

cudaMemcpyAsync(device_buffer, host_buffer.data(), 3 * ndim * sizeof(int64_t), cudaMemcpyHostToDevice, stream);
CUDA_CHECK(
cudaMemcpyAsync(device_buffer, host_buffer.data(), 3 * ndim * sizeof(int64_t), cudaMemcpyHostToDevice, stream));
// NOTE(dcj):
// Synchronize the stream here to ensure all preceding H2D/D2H memcpy
// operations have completed before the host buffers go out of scope.
CUDA_CHECK(cudaStreamSynchronize(stream));

int threads_per_block = 256;
int num_blocks = (num_elements + threads_per_block - 1) / threads_per_block;
Expand All @@ -278,7 +283,7 @@ std::shared_ptr<Tensor> TransposeForward(const std::shared_ptr<Tensor> &input, i
},
"CUDA TransposeForward");

cudaFreeAsync(device_buffer, stream);
CUDA_CHECK(cudaFreeAsync(device_buffer, stream));

return output;
}
Expand Down
Loading