Skip to content

Commit

Permalink
[ trivial ] Apply clang-format
Browse files Browse the repository at this point in the history
- New clang-format standard is applied to the project.

**Self evaluation:**
1. Build test:     [X]Passed [ ]Failed [ ]Skipped
2. Run test:     [X]Passed [ ]Failed [ ]Skipped

Signed-off-by: skykongkong8 <[email protected]>
  • Loading branch information
skykongkong8 authored and myungjoo committed Mar 6, 2025
1 parent 41f8368 commit 6da3c1f
Show file tree
Hide file tree
Showing 3 changed files with 43 additions and 43 deletions.
2 changes: 1 addition & 1 deletion nntrainer/tensor/cpu_backend/cpu_backend.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
#ifndef __CPU_BACKEND_H__
#define __CPU_BACKEND_H__
#ifdef __cplusplus
#if defined(__aarch64__) || defined(__ARM_ARCH_7A__) || \
#if defined(__aarch64__) || defined(__ARM_ARCH_7A__) || \
defined(__ANDROID__) || defined(__arm__)
#include <arm_compute_backend.h>
#elif defined(__x86_64__) || defined(__i586__)
Expand Down
20 changes: 10 additions & 10 deletions nntrainer/tensor/cpu_backend/fallback/fallback_internal.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,16 +20,16 @@
#include <stdexcept>
#include <tensor_dim.h>

#define sgemv_loop(ci, cj, cM, cN) \
do { \
float y0; \
unsigned int i, j; \
for (ci = 0; ci != cM; ci++) { \
y0 = Y[ci * incY] * beta; \
for (cj = 0; cj != cN; cj++) \
y0 += A[i + j * lda] * X[cj * incX]; \
Y[ci * incY] = y0; \
} \
#define sgemv_loop(ci, cj, cM, cN) \
do { \
float y0; \
unsigned int i, j; \
for (ci = 0; ci != cM; ci++) { \
y0 = Y[ci * incY] * beta; \
for (cj = 0; cj != cN; cj++) \
y0 += A[i + j * lda] * X[cj * incX]; \
Y[ci * incY] = y0; \
} \
} while (0);
namespace nntrainer {

Expand Down
64 changes: 32 additions & 32 deletions nntrainer/tensor/cpu_backend/fallback/fallback_internal_fp16.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,42 +21,42 @@
#include <stdexcept>
#include <tensor_dim.h>

#define hgemv_loop(ci, cj, cM, cN) \
do { \
float y0; \
unsigned int i, j; \
for (ci = 0; ci != cM; ci++) { \
y0 = static_cast<float>(Y[ci * incY] * static_cast<_FP16>(beta)); \
for (cj = 0; cj != cN; cj++) \
y0 += static_cast<float>(A[i + j * lda] * X[cj * incX]); \
Y[ci * incY] = static_cast<_FP16>(y0); \
} \
#define hgemv_loop(ci, cj, cM, cN) \
do { \
float y0; \
unsigned int i, j; \
for (ci = 0; ci != cM; ci++) { \
y0 = static_cast<float>(Y[ci * incY] * static_cast<_FP16>(beta)); \
for (cj = 0; cj != cN; cj++) \
y0 += static_cast<float>(A[i + j * lda] * X[cj * incX]); \
Y[ci * incY] = static_cast<_FP16>(y0); \
} \
} while (0);

#define hgemm_loop() \
do { \
for (unsigned int m = 0; m < M; ++m) { \
for (unsigned int n = 0; n < N; ++n) { \
float c = 0; \
_FP16 c_old = C[m * ldc + n]; \
for (unsigned int k = 0; k < K; ++k) { \
_FP16 a, b; \
a = ((TransA) ? A[k * lda + m] : A[m * lda + k]); \
b = ((TransB) ? B[n * ldb + k] : B[k * ldb + n]); \
c += static_cast<float>(a * b); \
} \
C[m * ldc + n] = static_cast<_FP16>(alpha * c); \
if (beta != 0.0) \
C[m * ldc + n] += static_cast<_FP16>(beta) * c_old; \
} \
} \
#define hgemm_loop() \
do { \
for (unsigned int m = 0; m < M; ++m) { \
for (unsigned int n = 0; n < N; ++n) { \
float c = 0; \
_FP16 c_old = C[m * ldc + n]; \
for (unsigned int k = 0; k < K; ++k) { \
_FP16 a, b; \
a = ((TransA) ? A[k * lda + m] : A[m * lda + k]); \
b = ((TransB) ? B[n * ldb + k] : B[k * ldb + n]); \
c += static_cast<float>(a * b); \
} \
C[m * ldc + n] = static_cast<_FP16>(alpha * c); \
if (beta != 0.0) \
C[m * ldc + n] += static_cast<_FP16>(beta) * c_old; \
} \
} \
} while (0);

#define haxpy_loop() \
do { \
unsigned int i; \
for (i = 0; i < N; ++i) \
Y[i * incY] = Y[i * incY] + static_cast<_FP16>(alpha) * X[i * incX]; \
#define haxpy_loop() \
do { \
unsigned int i; \
for (i = 0; i < N; ++i) \
Y[i * incY] = Y[i * incY] + static_cast<_FP16>(alpha) * X[i * incX]; \
} while (0);

namespace nntrainer {
Expand Down

0 comments on commit 6da3c1f

Please sign in to comment.