Skip to content

Commit

Permalink
refactor <cuda/std/cstring>
Browse files Browse the repository at this point in the history
  • Loading branch information
davebayer committed Jan 22, 2025
1 parent 6a0f48b commit 3793a2d
Show file tree
Hide file tree
Showing 481 changed files with 11,206 additions and 5,285 deletions.
1 change: 1 addition & 0 deletions .clangd
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ CompileFlags:
# strip CUDA flags unknown to clang
- "-ccbin*"
- "--compiler-options*"
- "--extended-lambda"
- "--expt-extended-lambda"
- "--expt-relaxed-constexpr"
- "-forward-unknown-to-host-compiler"
Expand Down
33 changes: 21 additions & 12 deletions .github/CODEOWNERS
Original file line number Diff line number Diff line change
@@ -1,20 +1,29 @@
# general codeowners for all files
# (Order matters. This needs to be at the top)
* @nvidia/cccl-codeowners

# Libraries
thrust/ @nvidia/cccl-thrust-codeowners @nvidia/cccl-codeowners
cub/ @nvidia/cccl-cub-codeowners @nvidia/cccl-codeowners
libcudacxx/ @nvidia/cccl-libcudacxx-codeowners @nvidia/cccl-codeowners
thrust/ @nvidia/cccl-thrust-codeowners
cub/ @nvidia/cccl-cub-codeowners
libcudacxx/ @nvidia/cccl-libcudacxx-codeowners
cudax/ @nvidia/cccl-cudax-codeowners
c/ @nvidia/cccl-c-codeowners
python/ @nvidia/cccl-python-codeowners

# Infrastructure
.github/ @nvidia/cccl-infra-codeowners @nvidia/cccl-codeowners
ci/ @nvidia/cccl-infra-codeowners @nvidia/cccl-codeowners
.devcontainer/ @nvidia/cccl-infra-codeowners @nvidia/cccl-codeowners
.github/ @nvidia/cccl-infra-codeowners
ci/ @nvidia/cccl-infra-codeowners
.devcontainer/ @nvidia/cccl-infra-codeowners
.pre-commit-config.yaml @nvidia/cccl-infra-codeowners
.clang-format @nvidia/cccl-infra-codeowners
.clangd @nvidia/cccl-infra-codeowners
c2h/ @nvidia/cccl-infra-codeowners
.vscode @nvidia/cccl-infra-codeowners

# cmake
**/CMakeLists.txt @nvidia/cccl-cmake-codeowners @nvidia/cccl-codeowners
**/cmake/ @nvidia/cccl-cmake-codeowners @nvidia/cccl-codeowners
**/CMakeLists.txt @nvidia/cccl-cmake-codeowners
**/cmake/ @nvidia/cccl-cmake-codeowners

# benchmarks
benchmarks/ @nvidia/cccl-benchmark-codeowners
**/benchmarks @nvidia/cccl-benchmark-codeowners

# docs
docs/ @nvidia/cccl-docs-codeowners
examples/ @nvidia/cccl-docs-codeowners
2 changes: 1 addition & 1 deletion .github/actions/docs-build/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -54,4 +54,4 @@ runs:
# Upload docs as pages artifacts
- name: Upload artifact
if: ${{ inputs.upload_pages_artifact == 'true' }}
uses: actions/upload-pages-artifact@v2
uses: actions/upload-pages-artifact@v3
2 changes: 1 addition & 1 deletion .github/workflows/build-docs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -45,4 +45,4 @@ jobs:
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v2
uses: actions/deploy-pages@v4
6 changes: 6 additions & 0 deletions .github/workflows/build-rapids.yml
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,12 @@ jobs:
sccache --show-adv-stats
done
done
# Exit with error if any failures occurred
if test ${#failures[@]} -ne 0; then
exit 1
fi
EOF
chmod +x "$RUNNER_TEMP"/ci{,-entrypoint}.sh
Expand Down
11 changes: 11 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,17 @@ repos:
hooks:
- id: ruff # linter
- id: ruff-format # formatter

# TOML lint & format
- repo: https://github.com/ComPWA/taplo-pre-commit
rev: v0.9.3
hooks:
# See https://github.com/NVIDIA/cccl/issues/3426
# - id: taplo-lint
# exclude: "^docs/"
- id: taplo-format
exclude: "^docs/"

- repo: https://github.com/codespell-project/codespell
rev: v2.3.0
hooks:
Expand Down
106 changes: 0 additions & 106 deletions CMakePresets.json
Original file line number Diff line number Diff line change
Expand Up @@ -73,8 +73,6 @@
"CUB_ENABLE_DIALECT_CPP20": true,
"THRUST_ENABLE_MULTICONFIG": true,
"THRUST_MULTICONFIG_WORKLOAD": "LARGE",
"THRUST_MULTICONFIG_ENABLE_DIALECT_CPP11": true,
"THRUST_MULTICONFIG_ENABLE_DIALECT_CPP14": true,
"THRUST_MULTICONFIG_ENABLE_DIALECT_CPP17": true,
"THRUST_MULTICONFIG_ENABLE_DIALECT_CPP20": true,
"THRUST_MULTICONFIG_ENABLE_SYSTEM_CPP": true,
Expand Down Expand Up @@ -128,28 +126,6 @@
"LIBCUDACXX_ENABLE_LIBCUDACXX_TESTS": true
}
},
{
"name": "libcudacxx-cpp11",
"displayName": "libcu++: C++11",
"inherits": "libcudacxx-base",
"cacheVariables": {
"CMAKE_CXX_STANDARD": "11",
"CMAKE_CUDA_STANDARD": "11",
"LIBCUDACXX_TEST_STANDARD_VER": "c++11",
"CCCL_IGNORE_DEPRECATED_CPP_11": true
}
},
{
"name": "libcudacxx-cpp14",
"displayName": "libcu++: C++14",
"inherits": "libcudacxx-base",
"cacheVariables": {
"CMAKE_CXX_STANDARD": "14",
"CMAKE_CUDA_STANDARD": "14",
"LIBCUDACXX_TEST_STANDARD_VER": "c++14",
"CCCL_IGNORE_DEPRECATED_CPP_14": true
}
},
{
"name": "libcudacxx-cpp17",
"displayName": "libcu++: C++17",
Expand Down Expand Up @@ -179,28 +155,6 @@
"CMAKE_CUDA_ARCHITECTURES": "70"
}
},
{
"name": "libcudacxx-nvrtc-cpp11",
"displayName": "libcu++ NVRTC: C++11",
"inherits": "libcudacxx-nvrtc-base",
"cacheVariables": {
"CMAKE_CXX_STANDARD": "11",
"CMAKE_CUDA_STANDARD": "11",
"LIBCUDACXX_TEST_STANDARD_VER": "c++11",
"CCCL_IGNORE_DEPRECATED_CPP_11": true
}
},
{
"name": "libcudacxx-nvrtc-cpp14",
"displayName": "libcu++ NVRTC: C++14",
"inherits": "libcudacxx-nvrtc-base",
"cacheVariables": {
"CMAKE_CXX_STANDARD": "14",
"CMAKE_CUDA_STANDARD": "14",
"LIBCUDACXX_TEST_STANDARD_VER": "c++14",
"CCCL_IGNORE_DEPRECATED_CPP_14": true
}
},
{
"name": "libcudacxx-nvrtc-cpp17",
"displayName": "libcu++ NVRTC: C++17",
Expand Down Expand Up @@ -261,8 +215,6 @@
"THRUST_MULTICONFIG_ENABLE_SYSTEM_CUDA": true,
"THRUST_MULTICONFIG_ENABLE_SYSTEM_OMP": true,
"THRUST_MULTICONFIG_ENABLE_SYSTEM_TBB": true,
"THRUST_MULTICONFIG_ENABLE_DIALECT_CPP11": false,
"THRUST_MULTICONFIG_ENABLE_DIALECT_CPP14": false,
"THRUST_MULTICONFIG_ENABLE_DIALECT_CPP17": false,
"THRUST_MULTICONFIG_ENABLE_DIALECT_CPP20": false
}
Expand Down Expand Up @@ -420,22 +372,6 @@
"libcudacxx.test.atomics.ptx"
]
},
{
"name": "libcudacxx-nvrtc-cpp11",
"hidden": false,
"inherits": [
"libcudacxx-nvrtcc"
],
"configurePreset": "libcudacxx-nvrtc-cpp11"
},
{
"name": "libcudacxx-nvrtc-cpp14",
"hidden": false,
"inherits": [
"libcudacxx-nvrtcc"
],
"configurePreset": "libcudacxx-nvrtc-cpp14"
},
{
"name": "libcudacxx-nvrtc-cpp17",
"hidden": false,
Expand All @@ -452,20 +388,6 @@
],
"configurePreset": "libcudacxx-nvrtc-cpp20"
},
{
"name": "libcudacxx-cpp11",
"configurePreset": "libcudacxx-cpp11",
"inherits": [
"libcudacxx-base"
]
},
{
"name": "libcudacxx-cpp14",
"configurePreset": "libcudacxx-cpp14",
"inherits": [
"libcudacxx-base"
]
},
{
"name": "libcudacxx-cpp17",
"configurePreset": "libcudacxx-cpp17",
Expand Down Expand Up @@ -572,20 +494,6 @@
"outputOnFailure": false
}
},
{
"name": "libcudacxx-lit-cpp11",
"configurePreset": "libcudacxx-cpp11",
"inherits": [
"libcudacxx-lit-base"
]
},
{
"name": "libcudacxx-lit-cpp14",
"configurePreset": "libcudacxx-cpp14",
"inherits": [
"libcudacxx-lit-base"
]
},
{
"name": "libcudacxx-lit-cpp17",
"configurePreset": "libcudacxx-cpp17",
Expand All @@ -607,20 +515,6 @@
"libcudacxx-lit-base"
]
},
{
"name": "libcudacxx-nvrtc-cpp11",
"configurePreset": "libcudacxx-nvrtc-cpp11",
"inherits": [
"libcudacxx-nvrtc-base"
]
},
{
"name": "libcudacxx-nvrtc-cpp14",
"configurePreset": "libcudacxx-nvrtc-cpp14",
"inherits": [
"libcudacxx-nvrtc-base"
]
},
{
"name": "libcudacxx-nvrtc-cpp17",
"configurePreset": "libcudacxx-nvrtc-cpp17",
Expand Down
4 changes: 2 additions & 2 deletions c/parallel/src/reduce.cu
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ std::string get_single_tile_kernel_name(
check(nvrtcGetTypeName<op_wrapper>(&reduction_op_t));

return std::format(
"cub::DeviceReduceSingleTileKernel<{0}, {1}, {2}, {3}, {4}, {5}, {6}>",
"cub::detail::reduce::DeviceReduceSingleTileKernel<{0}, {1}, {2}, {3}, {4}, {5}, {6}>",
chained_policy_t,
input_iterator_t,
output_iterator_t,
Expand Down Expand Up @@ -192,7 +192,7 @@ std::string get_device_reduce_kernel_name(cccl_op_t op, cccl_iterator_t input_it
check(nvrtcGetTypeName<cuda::std::__identity>(&transform_op_t));

return std::format(
"cub::DeviceReduceKernel<{0}, {1}, {2}, {3}, {4}, {5}>",
"cub::detail::reduce::DeviceReduceKernel<{0}, {1}, {2}, {3}, {4}, {5}>",
chained_policy_t,
input_iterator_t,
offset_t,
Expand Down
5 changes: 2 additions & 3 deletions c/parallel/test/test_main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,7 @@

#include <iostream>

#define CATCH_CONFIG_RUNNER
#include <catch2/catch.hpp>
#include <catch2/catch_session.hpp>

int device_guard(int device_id)
{
Expand All @@ -40,7 +39,7 @@ int main(int argc, char* argv[])
int device_id{};

// Build a new parser on top of Catch's
using namespace Catch::clara;
using namespace Catch::Clara;
auto cli = session.cli() | Opt(device_id, "device")["-d"]["--device"]("device id to use");
session.cli(cli);

Expand Down
4 changes: 3 additions & 1 deletion c/parallel/test/test_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,9 @@
#include <type_traits>
#include <vector>

#include <catch2/catch.hpp>
#include <catch2/catch_template_test_macros.hpp>
#include <catch2/catch_test_macros.hpp>
#include <catch2/generators/catch_generators_all.hpp>
#include <cccl/c/reduce.h>
#include <nvrtc.h>

Expand Down
12 changes: 4 additions & 8 deletions c2h/include/c2h/catch2_main.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,13 +36,9 @@
//! executable, this header is included into each test. On the other hand, when all the tests are compiled into a single
//! executable, this header is excluded from the tests and included into catch2_runner.cpp

#ifdef CUB_CONFIG_MAIN
# define CATCH_CONFIG_RUNNER
#endif

#include <catch2/catch.hpp>
#include <catch2/catch_session.hpp>

#if defined(CUB_CONFIG_MAIN)
#ifdef CUB_CONFIG_MAIN
# if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
# include <c2h/catch2_runner_helper.h>

Expand All @@ -59,7 +55,7 @@ int main(int argc, char* argv[])
int device_id{};

// Build a new parser on top of Catch's
using namespace Catch::clara;
using namespace Catch::Clara;
auto cli = session.cli() | Opt(device_id, "device")["-d"]["--device"]("device id to use");
session.cli(cli);

Expand All @@ -73,4 +69,4 @@ int main(int argc, char* argv[])
# endif // THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
return session.run(argc, argv);
}
#endif
#endif // CUB_CONFIG_MAIN
Loading

0 comments on commit 3793a2d

Please sign in to comment.