Skip to content

Commit dd0a66c

Browse files
committed
[GPU] Fix coverity issues
1 parent bbed74e commit dd0a66c

File tree

5 files changed

+18
-7
lines changed

5 files changed

+18
-7
lines changed

src/plugins/intel_gpu/src/graph/common_utils/jit_term.hpp

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -213,7 +213,9 @@ inline JitTerm operator/(const JitTerm& lhs, const JitTerm& rhs) {
213213
return lhs;
214214
}
215215
if (is_number(lhs) && is_number(rhs)) {
216-
return JitTerm{std::to_string(as_number<int64_t>(lhs) / as_number<int64_t>(rhs))};
216+
auto rhs_val = as_number<int64_t>(rhs);
217+
OPENVINO_ASSERT(rhs_val != 0, "Division by zero detected in operator/");
218+
return JitTerm{std::to_string(as_number<int64_t>(lhs) / rhs_val)};
217219
}
218220
return JitTerm{"(" + lhs.str() + " / " + rhs.str() + ")"};
219221
}
@@ -224,7 +226,9 @@ inline JitTerm operator%(const JitTerm& lhs, const JitTerm& rhs) {
224226
}
225227

226228
if (is_number(lhs) && is_number(rhs)) {
227-
return JitTerm{std::to_string(as_number<int64_t>(lhs) % as_number<int64_t>(rhs))};
229+
auto rhs_val = as_number<int64_t>(rhs);
230+
OPENVINO_ASSERT(rhs_val != 0, "Modulo by zero detected in operator%");
231+
return JitTerm{std::to_string(as_number<int64_t>(lhs) % rhs_val)};
228232
}
229233

230234
return JitTerm{"(" + lhs.str() + " % " + rhs.str() + ")"};

src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -781,6 +781,8 @@ void crop_in_place_optimization::update_in_place_crop_padding_simple_data_format
781781
std::vector<ov::Dimension::value_type> reshape_upper_sizes(output_rank, 0);
782782
padding::DynamicDimsMask reshape_dyn_pad_mask;
783783

784+
OPENVINO_ASSERT(reshape_axis >= 0 && static_cast<size_t>(reshape_axis) < output_rank, "[GPU] Calculated reshape_axis is out of range.");
785+
784786
reshape_lower_sizes[reshape_axis] = lower_sizes[crop_axis];
785787
reshape_upper_sizes[reshape_axis] = upper_sizes[crop_axis];
786788
reshape_dyn_pad_mask[reshape_axis] = 1;

src/plugins/intel_gpu/src/graph/impls/ocl/convolution.cpp

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -173,9 +173,12 @@ struct convolution_impl : typed_primitive_impl_ocl<convolution> {
173173
&& cp.weights.X().v == 1 && cp.weights.Y().v > 1
174174
&& !(cp.groups == cp.inputs[0].Feature().v && cp.inputs[0].Feature().v == cp.outputs[0].Feature().v)) {
175175
auto can_swap = [](const kernel_selector::Tensor::DataTensor& dt) -> bool {
176-
auto x_channel_idx = static_cast<uint32_t>(kernel_selector::Tensor::DataTensor::Channelndex(dt.GetLayout(),
177-
kernel_selector::Tensor::DataChannelName::X));
178-
auto x_axis_dim = dt.GetDims()[x_channel_idx];
176+
auto x_channel_idx = kernel_selector::Tensor::DataTensor::Channelndex(dt.GetLayout(),
177+
kernel_selector::Tensor::DataChannelName::X);
178+
if (x_channel_idx < 0) {
179+
return false;
180+
}
181+
auto x_axis_dim = dt.GetDims()[static_cast<size_t>(x_channel_idx)];
179182
return (x_axis_dim.pad.Total() == 0 && x_axis_dim.v == 1);
180183
};
181184

src/plugins/intel_gpu/src/kernel_selector/kernels/concatenation/concatenation_kernel_base.cpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -182,8 +182,9 @@ KernelsData ConcatenationKernelBase::GetCommonKernelsData(const Params& params)
182182
s.v.u32 = lastOffset;
183183
kernel.params.scalars.push_back(s);
184184
kernel.params.arguments.push_back({ArgumentDescriptor::Types::SCALAR, 0});
185-
size_t concatChannelIndex = (size_t)DataTensor::Channelndex(orgParams.inputs[i].GetLayout(), GetConcatChannel(orgParams));
186-
lastOffset += (uint32_t)input.GetDims()[concatChannelIndex].v;
185+
auto concatChannelIndex = DataTensor::Channelndex(orgParams.inputs[i].GetLayout(), GetConcatChannel(orgParams));
186+
OPENVINO_ASSERT(concatChannelIndex > -1, "[GPU] Invalid concatenation channel index found.");
187+
lastOffset += (uint32_t)input.GetDims()[static_cast<size_t>(concatChannelIndex)].v;
187188
}
188189

189190
return {kd};

src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -718,6 +718,7 @@ std::vector<cldnn::event::ptr> SyncInferRequest::prepare_batched_input(size_t in
718718
cldnn::mem_lock<uint8_t> dst_lock(merged_memory, stream);
719719
for (size_t i = 0; i < user_tensors.size(); i++) {
720720
auto input_tensor = std::dynamic_pointer_cast<RemoteTensorImpl>(user_tensors[i]._ptr);
721+
OPENVINO_ASSERT(input_tensor != nullptr, "[GPU] Failed to cast user tensor to RemoteTensorImpl");
721722
cldnn::mem_lock<uint8_t> src_lock(input_tensor->get_memory(), stream);
722723
std::memcpy(dst_lock.data() + i * input_tensor->get_byte_size(), src_lock.data(), input_tensor->get_byte_size());
723724
}

0 commit comments

Comments
 (0)