Skip to content

Commit

Permalink
[QNN EP] Re-enable several disabled QNN-EP UTs
Browse files Browse the repository at this point in the history
### Description
1. Re-enable UTs which passed 2.30
2. Fix conv and resize UTs
   - Make conv's weight as initializer to let graph.NumberOfNodes()
     match ep_nodes, which should be 1.
   - Update resize UT because "round_prefer_floor" is no longer
     supported in QNN SDK since 2.21.

### Motivation and Context
1. Make the UT of QNN EP pass as much as possible to improve the test
   coverage.
  • Loading branch information
kuanyul-quic authored and Kuan-Yu Lin committed Feb 24, 2025
1 parent e46c0d8 commit 518d09e
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 11 deletions.
20 changes: 12 additions & 8 deletions onnxruntime/test/providers/qnn/conv_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1519,9 +1519,10 @@ TEST_F(QnnHTPBackendTests, Conv3D_U16S8S32_PerChannel2) {
// Expected val: 87.354057312011719
// QNN QDQ val: 0 (err 87.354057312011719)
// CPU QDQ val: 87.3583984375 (err 0.00434112548828125)
TEST_F(QnnHTPBackendTests, DISABLED_ConvU16S16S32_DynamicBias) {
// Issue fixed in 2.30
TEST_F(QnnHTPBackendTests, ConvU16S16S32_DynamicBias) {
TestInputDef<float> input_def({1, 2, 5, 5}, false, GetFloatDataInRange(-10.0f, 10.0f, 50));
TestInputDef<float> weight_def({1, 2, 3, 3}, false, GetFloatDataInRange(-1.0f, 5.0f, 18));
TestInputDef<float> weight_def({1, 2, 3, 3}, true, GetFloatDataInRange(-1.0f, 5.0f, 18));
RunHTPConvOpTest<uint16_t, int16_t>("Conv",
input_def, // Input
weight_def.OverrideValueRange(-5.0f, 5.0f), // Weights (symmetric quant range)
Expand All @@ -1537,9 +1538,10 @@ TEST_F(QnnHTPBackendTests, DISABLED_ConvU16S16S32_DynamicBias) {

// Tests 16-bit QDQ Conv with dynamic weights and bias (uses QNN's DepthwiseConv2d)
// TODO(adrianlizarraga): FAIL: Failed to finalize QNN graph. Error code 1002
TEST_F(QnnHTPBackendTests, DISABLED_DepthwiseConvU16S16S32_DynamicBias) {
// Issue fixed in 2.30
TEST_F(QnnHTPBackendTests, DepthwiseConvU16S16S32_DynamicBias) {
TestInputDef<float> input_def({1, 1, 5, 5}, false, GetFloatDataInRange(-10.0f, 10.0f, 25));
TestInputDef<float> weight_def({1, 1, 3, 3}, false, GetFloatDataInRange(-1.0f, 5.0f, 9));
TestInputDef<float> weight_def({1, 1, 3, 3}, true, GetFloatDataInRange(-1.0f, 5.0f, 9));
RunHTPConvOpTest<uint16_t, int16_t>("Conv",
input_def, // Input
weight_def.OverrideValueRange(-5.0f, 5.0f), // Weights (symmetric quant range)
Expand All @@ -1559,9 +1561,10 @@ TEST_F(QnnHTPBackendTests, DISABLED_DepthwiseConvU16S16S32_DynamicBias) {
// Expected val: 85.354057312011719
// QNN QDQ val: 0 (err 85.354057312011719)
// CPU QDQ val: 85.358139038085938 (err 0.00408172607421875)
TEST_F(QnnHTPBackendTests, DISABLED_ConvU16S16S32_NoBias) {
// Issue fixed in 2.30
TEST_F(QnnHTPBackendTests, ConvU16S16S32_NoBias) {
TestInputDef<float> input_def({1, 2, 5, 5}, false, GetFloatDataInRange(-10.0f, 10.0f, 50));
TestInputDef<float> weight_def({1, 2, 3, 3}, false, GetFloatDataInRange(-1.0f, 5.0f, 18));
TestInputDef<float> weight_def({1, 2, 3, 3}, true, GetFloatDataInRange(-1.0f, 5.0f, 18));
RunHTPConvOpTest<uint16_t, int16_t>("Conv",
input_def, // Input
weight_def.OverrideValueRange(-5.0f, 5.0f), // Weights (symmetric quant range)
Expand All @@ -1577,12 +1580,13 @@ TEST_F(QnnHTPBackendTests, DISABLED_ConvU16S16S32_NoBias) {

// Tests 16-bit QDQ Conv with dynamic weights and no bias (uses QNN's DepthWiseConv2d)
// TODO(adrianlizarraga): FAIL: Failed to finalize QNN graph. Error code 1002
TEST_F(QnnHTPBackendTests, DISABLED_DepthwiseConvU16S16S32_NoBias) {
// Issue fixed in 2.30
TEST_F(QnnHTPBackendTests, DepthwiseConvU16S16S32_NoBias) {
std::vector<float> input_data = GetFloatDataInRange(-10.0f, 10.0f, 25);
std::vector<float> weight_data = GetFloatDataInRange(-10.0f, 10.0f, 9);
RunHTPConvOpTest<uint16_t, int16_t>("Conv",
TestInputDef<float>({1, 1, 5, 5}, false, input_data), // Input
TestInputDef<float>({1, 1, 3, 3}, false, weight_data), // Weights
TestInputDef<float>({1, 1, 3, 3}, true, weight_data), // Weights
TestInputDef<float>(), // Bias
{1, 1}, // Strides
{0, 0, 0, 0}, // Pads
Expand Down
5 changes: 3 additions & 2 deletions onnxruntime/test/providers/qnn/resize_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -456,11 +456,12 @@ TEST_F(QnnHTPBackendTests, ResizeU8_2xNearestAsymmetricRoundPreferFloor_Unsuppor
// ORT CPU EP (f32 model) outputs: -10.0000000 -10.0000000 -3.33333349 -3.33333349 -3.33333349 -3.33333349 -10.00 ...
// ORT CPU EP (qdq model) outputs: -9.96078491 -9.96078491 -3.29411769 -3.29411769 -3.29411769 -3.29411769 -9.961 ...
// ORT QNN EP (qdq model) outputs: -9.96078491 -9.96078491 -9.96078491 -3.37254906 -3.37254906 -3.37254906 -9.961 ...
TEST_F(QnnHTPBackendTests, DISABLED_ResizeU8_3xNearestAsymmetricRoundPreferFloor) {
// UPDATE: "round_prefer_floor" no longer supported in QNN SDK 2.21 (supported in QNN SDK 2.19)
TEST_F(QnnHTPBackendTests, ResizeU8_3xNearestAsymmetricRoundPreferFloor_Unsupported) {
std::vector<float> input_data = GetFloatDataInRange(-10.0f, 10.0f, 4);
RunQDQResizeOpTest<uint8_t>(TestInputDef<float>({1, 1, 2, 2}, false, input_data),
{1, 1, 6, 6}, "nearest", "asymmetric", "round_prefer_floor",
ExpectedEPNodeAssignment::All);
ExpectedEPNodeAssignment::None); // No longer supported as of QNN SDK 2.21
}

// Test 0.5x QDQ Resize mode: "nearest", coordinate_transformation_mode: "asymmetric", nearest_mode: "floor"
Expand Down
3 changes: 2 additions & 1 deletion onnxruntime/test/providers/qnn/simple_op_htp_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,8 @@ TEST_F(QnnHTPBackendTests, UnaryOp_Elu) {
// Expected val: -0.99751651287078857
// QNN QDQ val: 6.2726154327392578 (err 7.2701320648193359)
// CPU QDQ val: -0.99753034114837646 (err 1.3828277587890625e-05)
TEST_F(QnnHTPBackendTests, DISABLE_UnaryOp_Elu_U16) {
// Issue fixed in 2.30
TEST_F(QnnHTPBackendTests, UnaryOp_Elu_U16) {
RunQDQOpTest<uint16_t>("Elu",
{TestInputDef<float>({1, 2, 3}, false, GetFloatDataInRange(-10.0f, 10.0f, 6))},
{},
Expand Down

0 comments on commit 518d09e

Please sign in to comment.