Skip to content

Commit f9fb4be

Browse files
committed
translate comments to Eng
1 parent fd570e3 commit f9fb4be

File tree

3 files changed

+26
-30
lines changed

3 files changed

+26
-30
lines changed

tensorflow/lite/micro/kernels/one_hot.cc

Lines changed: 17 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -30,21 +30,22 @@ constexpr int kOnValueTensor = 2;
3030
constexpr int kOffValueTensor = 3;
3131
constexpr int kOutputTensor = 0;
3232

33-
namespace { // 로컬 유틸 함수들
33+
namespace { // Local Util functions
3434
inline int NumElements(const TfLiteEvalTensor* t) {
3535
int count = 1;
36-
// TfLiteEvalTensor의 dims는 TfLiteIntArray* 타입입니다.
3736
for (int i = 0; i < t->dims->size; ++i) {
3837
count *= t->dims->data[i];
3938
}
4039
return count;
4140
}
4241
} // namespace
4342

44-
// TfLiteNode에서 입력 (indices, depth, on_value, off_value) 및 출력 텐서
45-
// (output) 를 가져옴 params->axis 를 읽어 실제로 Depth 차원이 들어갈 위치
46-
// (Axis) 계산 Prepare과 Eval 함수 내에서 잠시 생성되었다가 사라짐 → Stack
47-
// memory 사용 효율적
43+
// Retrieves the input tensors (indices, depth, on_value, off_value) and the
44+
// output tensor (output) from the TfLiteNode.
45+
// Reads params->axis to compute the actual position (axis) where the depth
46+
// dimension will be inserted.
47+
// These values are created temporarily within the Prepare and Eval functions
48+
// and are destroyed afterward → efficient use of stack memory.
4849
struct OneHotContext {
4950
OneHotContext(TfLiteContext* context, TfLiteNode* node) {
5051
indices = tflite::micro::GetEvalInput(context, node, kIndicesTensor);
@@ -62,7 +63,7 @@ struct OneHotContext {
6263
}
6364

6465
const TfLiteEvalTensor* indices;
65-
const TfLiteEvalTensor* depth; // 새로 생기는 One-hot 차원 크기
66+
const TfLiteEvalTensor* depth;
6667
const TfLiteEvalTensor* on_value;
6768
const TfLiteEvalTensor* off_value;
6869
TfLiteEvalTensor* output;
@@ -72,8 +73,7 @@ struct OneHotContext {
7273
TfLiteType dtype;
7374
};
7475

75-
// 실제 연산 수행 함수
76-
//
76+
// Operation function
7777
template <typename T, typename TI>
7878
void OneHotComputeImpl(const OneHotContext& op_context) {
7979
int prefix_dim_size = 1;
@@ -121,20 +121,17 @@ TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
121121
const OneHotContext& op_context) {
122122
TF_LITE_ENSURE(context, *op_context.depth->data.i32 >= 0);
123123

124-
// depth 데이터 읽기
124+
// read depth data
125125
const int depth_val =
126126
*tflite::micro::GetTensorData<int32_t>(op_context.depth);
127127
TF_LITE_ENSURE(context, depth_val >= 0);
128128

129-
// Output Tensor 검증
129+
// Output Tensor evaluation
130130
TF_LITE_ENSURE(context, op_context.output != nullptr);
131131

132132
TF_LITE_ENSURE(context, op_context.output->dims != nullptr);
133133

134-
// todo
135-
// TFLM에서는 Output Tensor의 dims가 이미 할당되어 있다고 가정합니다.
136-
// 하지만 모델이 생성될 때 계산된 dims와 현재 depth값으로 계산한 dims가
137-
// 일치하는지 확인은 필요합니다.
134+
// TFLM assumes that the output tensor’s dims are already allocated
138135
const int expected_dims_size = op_context.output_dims;
139136
TF_LITE_ENSURE_EQ(context, op_context.output->dims->size, expected_dims_size);
140137

@@ -148,8 +145,8 @@ TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
148145
expected_dim_i = op_context.indices->dims->data[i - 1];
149146
}
150147

151-
// TFLM 컴파일러(Offline Memory Planner)가 할당해둔 크기와 실제 계산 크기가
152-
// 다르면 에러
148+
// If the size pre-allocated by the TFLM compiler (Offline Memory Planner)
149+
// does not match the actual computed size, an error is raised.
153150
TF_LITE_ENSURE_EQ(context, op_context.output->dims->data[i],
154151
expected_dim_i);
155152
}
@@ -191,8 +188,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
191188
TF_LITE_ENSURE_TYPES_EQ(context, op_context.off_value->type,
192189
op_context.dtype);
193190

194-
// depth 텐서가 상수가 아니더라도, 테스트에서는 output shape를
195-
// 미리 지정해 두었으므로 여기서는 그냥 검증만 수행
191+
// Even if the depth tensor is not a constant, the test predefines the output
192+
// shape, so here we only perform validation.
196193
return ResizeOutputTensor(context, op_context);
197194
}
198195

@@ -227,7 +224,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
227224

228225
} // namespace one_hot
229226

230-
// 헤더에 선언된 Register_ONE_HOT 구현
227+
// Implementation of Register_ONE_HOT declared in the header
231228
const TFLMRegistration* Register_ONE_HOT() {
232229
static TFLMRegistration r = {};
233230

tensorflow/lite/micro/kernels/one_hot.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ namespace tflite {
88
namespace ops {
99
namespace micro {
1010

11-
// ONE_HOT 커널 등록 함수 (all_ops_resolver 등에서 사용)
11+
// ONE_HOT Kernel regist function (use at all_ops_resolver)
1212
const TFLMRegistration* Register_ONE_HOT();
1313

1414
} // namespace micro

tensorflow/lite/micro/one_hot_test.cc

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -17,14 +17,14 @@ namespace tflite {
1717
namespace testing {
1818
namespace {
1919

20-
// OneHot 연산 테스트를 위한 헬퍼 함수
20+
// Helper function for OneHot operation test
2121
template <typename T>
2222
void TestOneHot(const int* indices_dims, const int32_t* indices_data,
2323
const int* depth_dims, const int32_t* depth_data,
2424
const int* on_dims, const T* on_data, const int* off_dims,
2525
const T* off_data, const int* output_dims,
2626
const T* expected_output_data, T* output_data, int axis = -1) {
27-
// 1. 텐서 설정
27+
// 1. Tensor Setting
2828
TfLiteIntArray* in_dims = IntArrayFromInts(indices_dims);
2929
TfLiteIntArray* d_dims = IntArrayFromInts(depth_dims);
3030
TfLiteIntArray* on_val_dims = IntArrayFromInts(on_dims);
@@ -33,26 +33,25 @@ void TestOneHot(const int* indices_dims, const int32_t* indices_data,
3333

3434
const int output_dims_count = ElementCount(*out_dims);
3535

36-
// 2. 입력 텐서 생성
36+
// 2. Create Input Tensor
3737
constexpr int inputs_size = 4;
3838
constexpr int outputs_size = 1;
3939
constexpr int tensors_size = inputs_size + outputs_size;
4040
TfLiteTensor tensors[tensors_size] = {
4141
CreateTensor(indices_data, in_dims), CreateTensor(depth_data, d_dims),
4242
CreateTensor(on_data, on_val_dims), CreateTensor(off_data, off_val_dims),
43-
CreateTensor(output_data, out_dims), // 출력 텐서 (데이터는 비워둠)
43+
CreateTensor(output_data, out_dims), // Output Tensor
4444
};
4545

46-
// 3. 파라미터 설정
46+
// 3. Parameter setting
4747
TfLiteOneHotParams builtin_data = {axis};
4848

49-
// 4. KernelRunner 실행
49+
// 4. KernelRunner execution
5050
int inputs_array_data[] = {4, 0, 1, 2, 3}; // indices, depth, on, off
5151
TfLiteIntArray* inputs_array = IntArrayFromInts(inputs_array_data);
5252
int outputs_array_data[] = {1, 4}; // output tensor index
5353
TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
5454

55-
// 등록 함수 이름은 구현하신 이름으로 변경 (예:
5655
// tflite::ops::micro::Register_ONE_HOT)
5756
const TFLMRegistration registration = *tflite::ops::micro::Register_ONE_HOT();
5857
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
@@ -62,7 +61,7 @@ void TestOneHot(const int* indices_dims, const int32_t* indices_data,
6261
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
6362
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
6463

65-
// 5. 결과 검증
64+
// 5. Result evaluation
6665
for (int i = 0; i < output_dims_count; ++i) {
6766
TF_LITE_MICRO_EXPECT_EQ(expected_output_data[i], output_data[i]);
6867
}
@@ -93,7 +92,7 @@ TF_LITE_MICRO_TEST(OneHot_BasicInt32) {
9392
const int output_dims[] = {2, 3, 3};
9493
const int32_t expected_output[] = {1, 0, 0, 0, 1, 0, 0, 0, 1};
9594

96-
int32_t output_data[9]; // 결과 받을 버퍼
95+
int32_t output_data[9];
9796

9897
tflite::testing::TestOneHot(indices_dims, indices_data, depth_dims,
9998
depth_data, on_dims, on_data, off_dims, off_data,

0 commit comments

Comments
 (0)