Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

#22890 Fix profiling on empty Optional #22891

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions onnxruntime/core/framework/sequential_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ static void CalculateTotalOutputSizes(OpKernelContextInternal* op_kernel_context
int output_count = op_kernel_context->OutputCount();
for (auto i = 0; i < output_count; i++) {
const OrtValue* p_output = op_kernel_context->GetOutputMLValue(i);
if (p_output != nullptr && p_output->IsTensor()) {
if (p_output != nullptr && p_output->IsTensor() && p_output->IsAllocated()) {
const auto& tensor = p_output->Get<Tensor>();
size_t tensor_size = tensor.SizeInBytes();
#if defined(TRACE_EXECUTION)
Expand Down Expand Up @@ -104,7 +104,7 @@ static void CalculateTotalInputSizes(const OpKernelContextInternal* op_kernel_co
const int input_count = op_kernel_context->InputCount();
for (auto i = 0; i < input_count; i++) {
const OrtValue* p_input = op_kernel_context->GetInputMLValue(i);
if (p_input != nullptr && p_input->IsTensor()) {
if (p_input != nullptr && p_input->IsTensor() && p_input->IsAllocated()) {
const OpKernelInfo& op_kernel_info = p_op_kernel->Info();
const Tensor* p_tensor = nullptr;
bool is_param = op_kernel_info.TryGetConstantInput(i, &p_tensor);
Expand Down
41 changes: 41 additions & 0 deletions onnxruntime/test/framework/inference_session_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -818,6 +818,47 @@ TEST(InferenceSessionTests, CheckRunProfilerStartTime) {
ASSERT_TRUE(before_start_time <= profiling_start_time && profiling_start_time <= after_start_time);
}

TEST(InferenceSessionTests, CheckRunProfilerWithOptionalValues) {
// Test whether the profiler can work on model with optional values
SessionOptions so;

so.session_logid = "CheckRunProfiler";
so.enable_profiling = true;
so.profile_file_prefix = ORT_TSTR("onnxprofile_profile_test");

InferenceSession session_object(so, GetEnvironment());
ASSERT_STATUS_OK(session_object.Load(ORT_TSTR("testdata/relu_with_optional.onnx")));
ASSERT_STATUS_OK(session_object.Initialize());

RunOptions run_options;
run_options.run_tag = "RunTag";

// prepare inputs
std::vector<int64_t> dims_x = {1};
std::vector<int> values_x = {-4};
OrtValue ml_value;
CreateMLValue<int>(TestCPUExecutionProvider()->CreatePreferredAllocators()[0], dims_x, values_x, &ml_value);
NameMLValMap feeds;
feeds.insert(std::make_pair("input", ml_value));

// prepare outputs
std::vector<std::string> output_names;
output_names.push_back("output");
std::vector<OrtValue> fetches;

// prepare expected inputs and outputs
std::vector<int64_t> expected_dims_y = {1};
std::vector<int> expected_values_y = {0};

// Now run
common::Status st = session_object.Run(run_options, feeds, output_names, &fetches);
if (!st.IsOK()) {
std::cout << "Run returned status: " << st.ErrorMessage() << std::endl;
}
ASSERT_TRUE(st.IsOK());
VerifyOutputs<int>(fetches.at(0).Get<Tensor>(), expected_dims_y, expected_values_y);
}

TEST(InferenceSessionTests, MultipleSessionsNoTimeout) {
SessionOptions session_options;

Expand Down
Binary file added onnxruntime/test/testdata/relu_with_optional.onnx
Binary file not shown.
Loading