Skip to content

Commit

Permalink
[clang-tidy] NO.15 enable cppcoreguidelines-pro-type-const-cast Part.1 (
Browse files Browse the repository at this point in the history
PaddlePaddle#58285)

* fix test

* fix test

* revert context_pool

* fix

* CI
  • Loading branch information
enkilee authored and SecretXV committed Nov 28, 2023
1 parent 4f9ce8d commit d25848a
Show file tree
Hide file tree
Showing 14 changed files with 90 additions and 75 deletions.
2 changes: 1 addition & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ cppcoreguidelines-explicit-virtual-functions,
cppcoreguidelines-init-variables,
cppcoreguidelines-narrowing-conversions,
cppcoreguidelines-no-malloc,
-cppcoreguidelines-pro-type-const-cast,
cppcoreguidelines-pro-type-const-cast,
-cppcoreguidelines-pro-type-member-init,
-cppcoreguidelines-slicing,
-hicpp-avoid-goto,
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/api/lib/context_pool.cc
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ const phi::DeviceContext* DeviceContextPool::Get(const Place& place) {
}

phi::DeviceContext* DeviceContextPool::GetMutable(const Place& place) {
return const_cast<phi::DeviceContext*>(Get(place));
return const_cast<phi::DeviceContext*>(Get(place)); // NOLINT
}

} // namespace experimental
Expand All @@ -72,7 +72,7 @@ namespace paddle {
PADDLE_API phi::Allocator* GetAllocator(const phi::Place& place) {
const phi::DeviceContext* dev_ctx =
paddle::experimental::DeviceContextPool::Instance().Get(place);
return const_cast<phi::Allocator*>(&dev_ctx->GetAllocator());
return const_cast<phi::Allocator*>(&dev_ctx->GetAllocator()); // NOLINT
}

#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/backends/device_manager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ void Device::BlasAXPBY(const stream::Stream& stream,
phi::CppTypeToDataType<T>::Type(),
numel,
alpha,
reinterpret_cast<void*>(const_cast<T*>(x)),
reinterpret_cast<void*>(const_cast<T*>(x)), // NOLINT
beta,
reinterpret_cast<void*>(y));
}
Expand Down
8 changes: 4 additions & 4 deletions paddle/phi/backends/gpu/gpu_context.cc
Original file line number Diff line number Diff line change
Expand Up @@ -919,17 +919,17 @@ ncclComm_t GPUContext::nccl_comm() const { return impl_->GetNcclComm(); }
void GPUContext::set_nccl_comm(ncclComm_t comm) { impl_->SetNcclComm(comm); }

void GPUContext::Init() {
impl_->allocator_ = const_cast<Allocator*>(&this->GetAllocator());
impl_->allocator_ = const_cast<Allocator*>(&this->GetAllocator()); // NOLINT
impl_->Init();
}

void GPUContext::SetStream(gpuStream_t stream) {
impl_->allocator_ = const_cast<Allocator*>(&this->GetAllocator());
impl_->allocator_ = const_cast<Allocator*>(&this->GetAllocator()); // NOLINT
impl_->SetStream(stream);
}

void GPUContext::SetCUDAStream(CUDAStream* stream, bool clear) {
impl_->allocator_ = const_cast<Allocator*>(&this->GetAllocator());
impl_->allocator_ = const_cast<Allocator*>(&this->GetAllocator()); // NOLINT
impl_->SetCUDAStream(stream, clear);
}

Expand Down Expand Up @@ -1006,7 +1006,7 @@ void GPUContext::PartialInitWithoutAllocator(int stream_priority) {
}

void GPUContext::PartialInitWithAllocator() {
impl_->allocator_ = const_cast<Allocator*>(&this->GetAllocator());
impl_->allocator_ = const_cast<Allocator*>(&this->GetAllocator()); // NOLINT
impl_->PartialInitWithAllocator();
}

Expand Down
12 changes: 8 additions & 4 deletions paddle/phi/core/device_context.cc
Original file line number Diff line number Diff line change
Expand Up @@ -176,8 +176,10 @@ struct DeviceContext::Impl {
allocator = cuda_graph_allocator_;
}
#endif
return tensor->AllocateFrom(
const_cast<Allocator*>(allocator), dtype, requested_size, fake_alloc);
return tensor->AllocateFrom(const_cast<Allocator*>(allocator),
dtype,
requested_size,
fake_alloc); // NOLINT
}

template <typename T>
Expand Down Expand Up @@ -218,8 +220,10 @@ struct DeviceContext::Impl {
(fake_alloc || tensor->numel() == 0) && requested_size == 0
? host_zero_allocator_
: host_allocator_;
return tensor->AllocateFrom(
const_cast<Allocator*>(allocator), dtype, requested_size, fake_alloc);
return tensor->AllocateFrom(const_cast<Allocator*>(allocator),
dtype,
requested_size,
fake_alloc); // NOLINT
}

template <typename T>
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/cpu/cumprod_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,10 @@ void CumprodGradKernel(const Context& dev_ctx,
Allocator::AllocationPtr x_conj;
Allocator::AllocationPtr out_conj;
if (phi::IsComplexType(x.dtype())) {
x_conj = const_cast<Allocator&>(dev_ctx.GetAllocator())
x_conj = const_cast<Allocator&>(dev_ctx.GetAllocator()) // NOLINT
.Allocate(numel * sizeof(T));
auto* x_data_conj = reinterpret_cast<T*>(x_conj->ptr());
out_conj = const_cast<Allocator&>(dev_ctx.GetAllocator())
out_conj = const_cast<Allocator&>(dev_ctx.GetAllocator()) // NOLINT
.Allocate(numel * sizeof(T));
auto* out_data_conj = reinterpret_cast<T*>(out_conj->ptr());

Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/kernels/cpu/instance_norm_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -93,9 +93,9 @@ void InstanceNormGradKernel(const Context& dev_ctx,
}

auto scale_e =
scale_ptr
? EigenVector<T>::Flatten(*scale_ptr)
: EigenVector<T>::Flatten(const_cast<const DenseTensor&>(scale_data));
scale_ptr ? EigenVector<T>::Flatten(*scale_ptr)
: EigenVector<T>::Flatten(
const_cast<const DenseTensor&>(scale_data)); // NOLINT
auto mean_e = EigenVector<T>::Flatten(saved_mean);
auto inv_var_e = EigenVector<T>::Flatten(saved_variance);
auto dy_e = EigenVector<T>::Flatten(d_y);
Expand Down
14 changes: 7 additions & 7 deletions paddle/phi/kernels/cpu/instance_norm_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -111,14 +111,14 @@ void InstanceNormKernel(const Context& dev_ctx,
set_constant(dev_ctx, &bias_data, static_cast<T>(0));
}
auto scale_e =
scale_ptr
? EigenVector<T>::Flatten(*scale_ptr)
: EigenVector<T>::Flatten(const_cast<const DenseTensor&>(scale_data));
scale_ptr ? EigenVector<T>::Flatten(*scale_ptr)
: EigenVector<T>::Flatten(
const_cast<const DenseTensor&>(scale_data)); // NOLINT
auto scale_arr = scale_e.reshape(C_shape);
auto bias_e =
bias_ptr
? EigenVector<T>::Flatten(*bias_ptr)
: EigenVector<T>::Flatten(const_cast<const DenseTensor&>(bias_data));
auto bias_e = bias_ptr
? EigenVector<T>::Flatten(*bias_ptr)
: EigenVector<T>::Flatten(
const_cast<const DenseTensor&>(bias_data)); // NOLINT
auto bias_arr = bias_e.reshape(C_shape);

dev_ctx.template Alloc<T>(y);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/matrix_rank_tol_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ void LapackSVD(const T* x_data, T* eigenvalues_data, int rows, int cols) {
char jobz = 'N';
int mx = std::max(rows, cols);
int mn = std::min(rows, cols);
T* a = const_cast<T*>(x_data);
T* a = const_cast<T*>(x_data); // NOLINT
int lda = rows;
int lwork = 3 * mn + std::max(mx, 7 * mn);
std::vector<T> work(lwork);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/svd_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ void LapackSvd(
char jobz = full ? 'A' : 'S';
int mx = std::max(rows, cols);
int mn = std::min(rows, cols);
T* a = const_cast<T*>(X);
T* a = const_cast<T*>(X); // NOLINT
int lda = rows;
int ldu = rows;
int ldvt = full ? cols : mn;
Expand Down
Loading

0 comments on commit d25848a

Please sign in to comment.