Skip to content

Commit

Permalink
[coverity] fix un-initialization issues
Browse files Browse the repository at this point in the history
fixed some un-unitialization issues

**Self evaluation:**

Build test: [x]Passed [ ]Failed [ ]Skipped
Run test: [x]Passed [ ]Failed [ ]Skipped

Signed-off-by: Seungbaek Hong <[email protected]>
  • Loading branch information
baek2sm committed Feb 12, 2025
1 parent df8444a commit f37f93c
Show file tree
Hide file tree
Showing 9 changed files with 25 additions and 11 deletions.
8 changes: 6 additions & 2 deletions nntrainer/graph/network_graph.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,9 @@ class NetworkGraph {
optimize_memory(true),
exec_mode(ExecutionMode::TRAIN),
tensor_format("NCHW"),
tensor_dtype(split("FP32-FP32", getRegex("\\-"))) {
tensor_dtype(split("FP32-FP32", getRegex("\\-"))),
is_clip_grad(false),
loss_scale(1.0f) {
nan_count = 0;
}

Expand Down Expand Up @@ -80,7 +82,9 @@ class NetworkGraph {
optimize_memory(true),
exec_mode(mode),
tensor_format(tensor_format_),
tensor_dtype(split(tensor_dtype_, getRegex("\\-"))) {
tensor_dtype(split(tensor_dtype_, getRegex("\\-"))),
is_clip_grad(false),
loss_scale(1.0f) {
nan_count = 0;
}

Expand Down
3 changes: 2 additions & 1 deletion nntrainer/layers/layer_node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,8 @@ LayerNode::LayerNode(std::unique_ptr<nntrainer::Layer> &&l) :
loss(new props::Loss()),
regularization_loss(0.0f),
exec_order({0, 0, 0, 0}),
needs_restore_data(false) {
needs_restore_data(false),
data_type({TensorDim::DataType::FP32, TensorDim::DataType::FP32}) {
if (layer && layer->getType() == TimeDistLayer::type) {
std::get<props::Distribute>(*layer_node_props).set(true);
}
Expand Down
2 changes: 1 addition & 1 deletion nntrainer/tensor/char_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
namespace nntrainer {

CharTensor::CharTensor(std::string name_, Tformat fm, QScheme qscheme_) :
TensorBase(name_, fm, Tdatatype::QINT8) {}
TensorBase(name_, fm, Tdatatype::QINT8), qscheme(qscheme_) {}

CharTensor::CharTensor(const TensorDim &d, bool alloc_now, Initializer init,
std::string name, QScheme qscheme_) :
Expand Down
3 changes: 2 additions & 1 deletion nntrainer/tensor/char_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,8 @@ class CharTensor : public TensorBase {
* @brief Construct a new CharTensor object
* @param rhs TensorBase object to copy
*/
CharTensor(TensorBase &rhs) : TensorBase(rhs) {}
CharTensor(TensorBase &rhs) :
TensorBase(rhs), qscheme(QScheme::PER_TENSOR_AFFINE) {}

/**
* @brief Basic Destructor
Expand Down
4 changes: 2 additions & 2 deletions nntrainer/tensor/int4_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
namespace nntrainer {

Int4QTensor::Int4QTensor(std::string name_, Tformat fm, QScheme qscheme_) :
TensorBase(name_, fm, Tdatatype::QINT4) {}
TensorBase(name_, fm, Tdatatype::QINT4), qscheme(qscheme_) {}

Int4QTensor::Int4QTensor(const TensorDim &d, bool alloc_now, Initializer init,
std::string name, QScheme qscheme_) :
Expand Down Expand Up @@ -339,7 +339,7 @@ std::vector<unsigned int> Int4QTensor::argmax() const {

for (unsigned int b = 0; b < batch_size; ++b) {
int8_t curr_val, max_val = -8;
unsigned int max_element_idx;
unsigned int max_element_idx = 0;
for (unsigned int idx = 0; idx < feature_len; ++idx) {
curr_val = getValue(idx + b * feature_len);

Expand Down
3 changes: 2 additions & 1 deletion nntrainer/tensor/int4_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,8 @@ class Int4QTensor : public TensorBase {
* @brief Construct a new Int4QTensor object
* @param rhs TensorBase object to copy
*/
Int4QTensor(TensorBase &rhs) : TensorBase(rhs) {}
Int4QTensor(TensorBase &rhs) :
TensorBase(rhs), qscheme(QScheme::PER_TENSOR_AFFINE) {}

/**
* @brief Basic Destructor
Expand Down
7 changes: 6 additions & 1 deletion nntrainer/tensor/quantizer.h
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,12 @@ class PerChannelAffineQuantizer : public UniformQuantizer {
/**
* @brief Basic Constructor of a PerChannelAffineQuantizer
*/
PerChannelAffineQuantizer() : UniformQuantizer() {}
PerChannelAffineQuantizer() :
UniformQuantizer(),
scales(nullptr),
zero_points(nullptr),
quant_min(0),
quant_max(0) {}

/**
* @copydoc Quantizer::create()
Expand Down
3 changes: 2 additions & 1 deletion nntrainer/tensor/short_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,8 @@ class ShortTensor : public TensorBase {
* @brief Construct a new ShortTensor object
* @param rhs TensorBase object to copy
*/
ShortTensor(TensorBase &rhs) : TensorBase(rhs) {}
ShortTensor(TensorBase &rhs) :
TensorBase(rhs), qscheme(QScheme::PER_TENSOR_AFFINE) {}

/**
* @brief Basic Destructor
Expand Down
3 changes: 2 additions & 1 deletion nntrainer/tensor/uint_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,8 @@ template <typename T> class UIntTensor : public TensorBase {
* @brief Construct a new UIntTensor object
* @param rhs TensorBase object to copy
*/
UIntTensor(TensorBase &rhs) : TensorBase(rhs) {}
UIntTensor(TensorBase &rhs) :
TensorBase(rhs), qscheme(QScheme::PER_TENSOR_AFFINE) {}

/**
* @brief Basic Destructor
Expand Down

0 comments on commit f37f93c

Please sign in to comment.