Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Error in optimize the model and emit C++ code #55

Open
dinghezier opened this issue Oct 3, 2022 · 0 comments
Open

Error in optimize the model and emit C++ code #55

dinghezier opened this issue Oct 3, 2022 · 0 comments

Comments

@dinghezier
Copy link

dinghezier commented Oct 3, 2022

I had gotten 'resnet18.mlir' file, and output type is 'TOSA' . When I use the command for optimizing the model and emiting C++ code, I get errors, how can I solve this problem?

This is part of the 'resnet18.mlir' file that I'm using:

%21 = "tosa.const"() {value = dense<0.000000e+00> : tensor<64xf32>} : () -> tensor<64xf32>
    %22 = "tosa.const"() {value = dense<[0, 2, 3, 1]> : tensor<4xi32>} : () -> tensor<4xi32>
    %23 = "tosa.const"() {value = dense<[0, 3, 1, 2]> : tensor<4xi32>} : () -> tensor<4xi32>
    %24 = "tosa.const"() {value = dense<0.000000e+00> : tensor<128xf32>} : () -> tensor<128xf32>
    %25 = "tosa.const"() {value = dense<0.000000e+00> : tensor<256xf32>} : () -> tensor<256xf32>
    %26 = "tosa.const"() {value = dense<0.000000e+00> : tensor<512xf32>} : () -> tensor<512xf32>
    %27 = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32>
    %28 = "tosa.const"() {value = dense<[[-0.0172594767, 0.00439959345, -0.0343063548, -0.03396843, -0.0193317942, -0.0121321231, 0.0357438102, 0.0399024114, 0.022784112, -0.0243642293]]> : tensor<1x10xf32>} : () -> tensor<1x10xf32>
    %29 = "tosa.transpose"(%arg0, %22) : (tensor<1x3x32x32xf32>, tensor<4xi32>) -> tensor<1x32x32x3xf32>
    %30 = "tosa.transpose"(%20, %22) : (tensor<64x3x3x3xf32>, tensor<4xi32>) -> tensor<64x3x3x3xf32>
    %31 = "tosa.conv2d"(%29, %30, %21) {dilation = [1, 1], pad = [1, 1, 1, 1], stride = [1, 1]} : (tensor<1x32x32x3xf32>, tensor<64x3x3x3xf32>, tensor<64xf32>) -> tensor<1x32x32x64xf32>
    %32 = "tosa.transpose"(%31, %23) : (tensor<1x32x32x64xf32>, tensor<4xi32>) -> tensor<1x64x32x32xf32>
    %33 = "tosa.clamp"(%32) {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x64x32x32xf32>) -> tensor<1x64x32x32xf32>
    %34 = "tosa.transpose"(%33, %22) : (tensor<1x64x32x32xf32>, tensor<4xi32>) -> tensor<1x32x32x64xf32>
    %35 = "tosa.transpose"(%19, %22) : (tensor<64x64x3x3xf32>, tensor<4xi32>) -> tensor<64x3x3x64xf32>
    %36 = "tosa.conv2d"(%34, %35, %21) {dilation = [1, 1], pad = [1, 1, 1, 1], stride = [1, 1]} : (tensor<1x32x32x64xf32>, tensor<64x3x3x64xf32>, tensor<64xf32>) -> tensor<1x32x32x64xf32>
    %37 = "tosa.transpose"(%36, %23) : (tensor<1x32x32x64xf32>, tensor<4xi32>) -> tensor<1x64x32x32xf32>
    %38 = "tosa.clamp"(%37) {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x64x32x32xf32>) -> tensor<1x64x32x32xf32>
    %39 = "tosa.transpose"(%38, %22) : (tensor<1x64x32x32xf32>, tensor<4xi32>) -> tensor<1x32x32x64xf32>
    %40 = "tosa.transpose"(%18, %22) : (tensor<64x64x3x3xf32>, tensor<4xi32>) -> tensor<64x3x3x64xf32>
    %41 = "tosa.conv2d"(%39, %40, %21) {dilation = [1, 1], pad = [1, 1, 1, 1], stride = [1, 1]} : (tensor<1x32x32x64xf32>, tensor<64x3x3x64xf32>, tensor<64xf32>) -> tensor<1x32x32x64xf32>
    %42 = "tosa.transpose"(%41, %23) : (tensor<1x32x32x64xf32>, tensor<4xi32>) -> tensor<1x64x32x32xf32>
    %43 = "tosa.add"(%42, %33) : (tensor<1x64x32x32xf32>, tensor<1x64x32x32xf32>) -> tensor<1x64x32x32xf32>
    %44 = "tosa.clamp"(%43) {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x64x32x32xf32>) -> tensor<1x64x32x32xf32>
    %45 = "tosa.transpose"(%44, %22) : (tensor<1x64x32x32xf32>, tensor<4xi32>) -> tensor<1x32x32x64xf32>
    %46 = "tosa.transpose"(%17, %22) : (tensor<64x64x3x3xf32>, tensor<4xi32>) -> tensor<64x3x3x64xf32>
    %47 = "tosa.conv2d"(%45, %46, %21) {dilation = [1, 1], pad = [1, 1, 1, 1], stride = [1, 1]} : (tensor<1x32x32x64xf32>, tensor<64x3x3x64xf32>, tensor<64xf32>) -> tensor<1x32x32x64xf32>
    %48 = "tosa.transpose"(%47, %23) : (tensor<1x32x32x64xf32>, tensor<4xi32>) -> tensor<1x64x32x32xf32>
    %49 = "tosa.clamp"(%48) {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x64x32x32xf32>) -> tensor<1x64x32x32xf32>
    %50 = "tosa.transpose"(%49, %22) : (tensor<1x64x32x32xf32>, tensor<4xi32>) -> tensor<1x32x32x64xf32>
    %51 = "tosa.transpose"(%16, %22) : (tensor<64x64x3x3xf32>, tensor<4xi32>) -> tensor<64x3x3x64xf32>
    %52 = "tosa.conv2d"(%50, %51, %21) {dilation = [1, 1], pad = [1, 1, 1, 1], stride = [1, 1]} : (tensor<1x32x32x64xf32>, tensor<64x3x3x64xf32>, tensor<64xf32>) -> tensor<1x32x32x64xf32>
    %53 = "tosa.transpose"(%52, %23) : (tensor<1x32x32x64xf32>, tensor<4xi32>) -> tensor<1x64x32x32xf32>
    %54 = "tosa.add"(%53, %44) : (tensor<1x64x32x32xf32>, tensor<1x64x32x32xf32>) -> tensor<1x64x32x32xf32>
    %55 = "tosa.clamp"(%54) {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x64x32x32xf32>) -> tensor<1x64x32x32xf32>
    %56 = "tosa.transpose"(%55, %22) : (tensor<1x64x32x32xf32>, tensor<4xi32>) -> tensor<1x32x32x64xf32>
    %57 = "tosa.transpose"(%15, %22) : (tensor<128x64x3x3xf32>, tensor<4xi32>) -> tensor<128x3x3x64xf32>
    %58 = "tosa.conv2d"(%56, %57, %24) {dilation = [1, 1], pad = [1, 1, 1, 1], stride = [2, 2]} : (tensor<1x32x32x64xf32>, tensor<128x3x3x64xf32>, tensor<128xf32>) -> tensor<1x16x16x128xf32>
    %59 = "tosa.transpose"(%58, %23) : (tensor<1x16x16x128xf32>, tensor<4xi32>) -> tensor<1x128x16x16xf32>
    %60 = "tosa.clamp"(%59) {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x128x16x16xf32>) -> tensor<1x128x16x16xf32>
    %61 = "tosa.transpose"(%60, %22) : (tensor<1x128x16x16xf32>, tensor<4xi32>) -> tensor<1x16x16x128xf32>
    %62 = "tosa.transpose"(%14, %22) : (tensor<128x128x3x3xf32>, tensor<4xi32>) -> tensor<128x3x3x128xf32>
    %63 = "tosa.conv2d"(%61, %62, %24) {dilation = [1, 1], pad = [1, 1, 1, 1], stride = [1, 1]} : (tensor<1x16x16x128xf32>, tensor<128x3x3x128xf32>, tensor<128xf32>) -> tensor<1x16x16x128xf32>
    %64 = "tosa.transpose"(%63, %23) : (tensor<1x16x16x128xf32>, tensor<4xi32>) -> tensor<1x128x16x16xf32>
    %65 = "tosa.transpose"(%13, %22) : (tensor<128x64x1x1xf32>, tensor<4xi32>) -> tensor<128x1x1x64xf32>
    %66 = "tosa.conv2d"(%56, %65, %24) {dilation = [1, 1], pad = [0, 0, 0, 0], stride = [2, 2]} : (tensor<1x32x32x64xf32>, tensor<128x1x1x64xf32>, tensor<128xf32>) -> tensor<1x16x16x128xf32>
    %67 = "tosa.transpose"(%66, %23) : (tensor<1x16x16x128xf32>, tensor<4xi32>) -> tensor<1x128x16x16xf32>
    %68 = "tosa.add"(%64, %67) : (tensor<1x128x16x16xf32>, tensor<1x128x16x16xf32>) -> tensor<1x128x16x16xf32>
    %69 = "tosa.clamp"(%68) {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x128x16x16xf32>) -> tensor<1x128x16x16xf32>
    %70 = "tosa.transpose"(%69, %22) : (tensor<1x128x16x16xf32>, tensor<4xi32>) -> tensor<1x16x16x128xf32>
    %71 = "tosa.transpose"(%12, %22) : (tensor<128x128x3x3xf32>, tensor<4xi32>) -> tensor<128x3x3x128xf32>
    %72 = "tosa.conv2d"(%70, %71, %24) {dilation = [1, 1], pad = [1, 1, 1, 1], stride = [1, 1]} : (tensor<1x16x16x128xf32>, tensor<128x3x3x128xf32>, tensor<128xf32>) -> tensor<1x16x16x128xf32>
    %73 = "tosa.transpose"(%72, %23) : (tensor<1x16x16x128xf32>, tensor<4xi32>) -> tensor<1x128x16x16xf32>
    %74 = "tosa.clamp"(%73) {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x128x16x16xf32>) -> tensor<1x128x16x16xf32>
    %75 = "tosa.transpose"(%74, %22) : (tensor<1x128x16x16xf32>, tensor<4xi32>) -> tensor<1x16x16x128xf32>
    %76 = "tosa.transpose"(%11, %22) : (tensor<128x128x3x3xf32>, tensor<4xi32>) -> tensor<128x3x3x128xf32>
    %77 = "tosa.conv2d"(%75, %76, %24) {dilation = [1, 1], pad = [1, 1, 1, 1], stride = [1, 1]} : (tensor<1x16x16x128xf32>, tensor<128x3x3x128xf32>, tensor<128xf32>) -> tensor<1x16x16x128xf32>
    %78 = "tosa.transpose"(%77, %23) : (tensor<1x16x16x128xf32>, tensor<4xi32>) -> tensor<1x128x16x16xf32>
    %79 = "tosa.add"(%78, %69) : (tensor<1x128x16x16xf32>, tensor<1x128x16x16xf32>) -> tensor<1x128x16x16xf32>
    %80 = "tosa.clamp"(%79) {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x128x16x16xf32>) -> tensor<1x128x16x16xf32>
    %81 = "tosa.transpose"(%80, %22) : (tensor<1x128x16x16xf32>, tensor<4xi32>) -> tensor<1x16x16x128xf32>
    %82 = "tosa.transpose"(%10, %22) : (tensor<256x128x3x3xf32>, tensor<4xi32>) -> tensor<256x3x3x128xf32>
    %83 = "tosa.conv2d"(%81, %82, %25) {dilation = [1, 1], pad = [1, 1, 1, 1], stride = [2, 2]} : (tensor<1x16x16x128xf32>, tensor<256x3x3x128xf32>, tensor<256xf32>) -> tensor<1x8x8x256xf32>
    %84 = "tosa.transpose"(%83, %23) : (tensor<1x8x8x256xf32>, tensor<4xi32>) -> tensor<1x256x8x8xf32>
    %85 = "tosa.clamp"(%84) {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x256x8x8xf32>) -> tensor<1x256x8x8xf32>
    %86 = "tosa.transpose"(%85, %22) : (tensor<1x256x8x8xf32>, tensor<4xi32>) -> tensor<1x8x8x256xf32>
    %87 = "tosa.transpose"(%9, %22) : (tensor<256x256x3x3xf32>, tensor<4xi32>) -> tensor<256x3x3x256xf32>
    %88 = "tosa.conv2d"(%86, %87, %25) {dilation = [1, 1], pad = [1, 1, 1, 1], stride = [1, 1]} : (tensor<1x8x8x256xf32>, tensor<256x3x3x256xf32>, tensor<256xf32>) -> tensor<1x8x8x256xf32>
    %89 = "tosa.transpose"(%88, %23) : (tensor<1x8x8x256xf32>, tensor<4xi32>) -> tensor<1x256x8x8xf32>
    %90 = "tosa.transpose"(%8, %22) : (tensor<256x128x1x1xf32>, tensor<4xi32>) -> tensor<256x1x1x128xf32>
    %91 = "tosa.conv2d"(%81, %90, %25) {dilation = [1, 1], pad = [0, 0, 0, 0], stride = [2, 2]} : (tensor<1x16x16x128xf32>, tensor<256x1x1x128xf32>, tensor<256xf32>) -> tensor<1x8x8x256xf32>
    %92 = "tosa.transpose"(%91, %23) : (tensor<1x8x8x256xf32>, tensor<4xi32>) -> tensor<1x256x8x8xf32>
    %93 = "tosa.add"(%89, %92) : (tensor<1x256x8x8xf32>, tensor<1x256x8x8xf32>) -> tensor<1x256x8x8xf32>
    %94 = "tosa.clamp"(%93) {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x256x8x8xf32>) -> tensor<1x256x8x8xf32>
    %95 = "tosa.transpose"(%94, %22) : (tensor<1x256x8x8xf32>, tensor<4xi32>) -> tensor<1x8x8x256xf32>
    %96 = "tosa.transpose"(%7, %22) : (tensor<256x256x3x3xf32>, tensor<4xi32>) -> tensor<256x3x3x256xf32>
    %97 = "tosa.conv2d"(%95, %96, %25) {dilation = [1, 1], pad = [1, 1, 1, 1], stride = [1, 1]} : (tensor<1x8x8x256xf32>, tensor<256x3x3x256xf32>, tensor<256xf32>) -> tensor<1x8x8x256xf32>
    %98 = "tosa.transpose"(%97, %23) : (tensor<1x8x8x256xf32>, tensor<4xi32>) -> tensor<1x256x8x8xf32>
    %99 = "tosa.clamp"(%98) {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x256x8x8xf32>) -> tensor<1x256x8x8xf32>
    %100 = "tosa.transpose"(%99, %22) : (tensor<1x256x8x8xf32>, tensor<4xi32>) -> tensor<1x8x8x256xf32>
    %101 = "tosa.transpose"(%6, %22) : (tensor<256x256x3x3xf32>, tensor<4xi32>) -> tensor<256x3x3x256xf32>
    %102 = "tosa.conv2d"(%100, %101, %25) {dilation = [1, 1], pad = [1, 1, 1, 1], stride = [1, 1]} : (tensor<1x8x8x256xf32>, tensor<256x3x3x256xf32>, tensor<256xf32>) -> tensor<1x8x8x256xf32>
    %103 = "tosa.transpose"(%102, %23) : (tensor<1x8x8x256xf32>, tensor<4xi32>) -> tensor<1x256x8x8xf32>
    %104 = "tosa.add"(%103, %94) : (tensor<1x256x8x8xf32>, tensor<1x256x8x8xf32>) -> tensor<1x256x8x8xf32>
    %105 = "tosa.clamp"(%104) {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x256x8x8xf32>) -> tensor<1x256x8x8xf32>
    %106 = "tosa.transpose"(%105, %22) : (tensor<1x256x8x8xf32>, tensor<4xi32>) -> tensor<1x8x8x256xf32>
    %107 = "tosa.transpose"(%5, %22) : (tensor<512x256x3x3xf32>, tensor<4xi32>) -> tensor<512x3x3x256xf32>
    %108 = "tosa.conv2d"(%106, %107, %26) {dilation = [1, 1], pad = [1, 1, 1, 1], stride = [2, 2]} : (tensor<1x8x8x256xf32>, tensor<512x3x3x256xf32>, tensor<512xf32>) -> tensor<1x4x4x512xf32>
    %109 = "tosa.transpose"(%108, %23) : (tensor<1x4x4x512xf32>, tensor<4xi32>) -> tensor<1x512x4x4xf32>
    %110 = "tosa.clamp"(%109) {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x512x4x4xf32>) -> tensor<1x512x4x4xf32>
    %111 = "tosa.transpose"(%110, %22) : (tensor<1x512x4x4xf32>, tensor<4xi32>) -> tensor<1x4x4x512xf32>
    %112 = "tosa.transpose"(%4, %22) : (tensor<512x512x3x3xf32>, tensor<4xi32>) -> tensor<512x3x3x512xf32>
    %113 = "tosa.conv2d"(%111, %112, %26) {dilation = [1, 1], pad = [1, 1, 1, 1], stride = [1, 1]} : (tensor<1x4x4x512xf32>, tensor<512x3x3x512xf32>, tensor<512xf32>) -> tensor<1x4x4x512xf32>
    %114 = "tosa.transpose"(%113, %23) : (tensor<1x4x4x512xf32>, tensor<4xi32>) -> tensor<1x512x4x4xf32>
    %115 = "tosa.transpose"(%3, %22) : (tensor<512x256x1x1xf32>, tensor<4xi32>) -> tensor<512x1x1x256xf32>
    %116 = "tosa.conv2d"(%106, %115, %26) {dilation = [1, 1], pad = [0, 0, 0, 0], stride = [2, 2]} : (tensor<1x8x8x256xf32>, tensor<512x1x1x256xf32>, tensor<512xf32>) -> tensor<1x4x4x512xf32>
    %117 = "tosa.transpose"(%116, %23) : (tensor<1x4x4x512xf32>, tensor<4xi32>) -> tensor<1x512x4x4xf32>
    %118 = "tosa.add"(%114, %117) : (tensor<1x512x4x4xf32>, tensor<1x512x4x4xf32>) -> tensor<1x512x4x4xf32>
    %119 = "tosa.clamp"(%118) {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x512x4x4xf32>) -> tensor<1x512x4x4xf32>
    %120 = "tosa.transpose"(%119, %22) : (tensor<1x512x4x4xf32>, tensor<4xi32>) -> tensor<1x4x4x512xf32>
    %121 = "tosa.transpose"(%2, %22) : (tensor<512x512x3x3xf32>, tensor<4xi32>) -> tensor<512x3x3x512xf32>
    %122 = "tosa.conv2d"(%120, %121, %26) {dilation = [1, 1], pad = [1, 1, 1, 1], stride = [1, 1]} : (tensor<1x4x4x512xf32>, tensor<512x3x3x512xf32>, tensor<512xf32>) -> tensor<1x4x4x512xf32>
    %123 = "tosa.transpose"(%122, %23) : (tensor<1x4x4x512xf32>, tensor<4xi32>) -> tensor<1x512x4x4xf32>
    %124 = "tosa.clamp"(%123) {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x512x4x4xf32>) -> tensor<1x512x4x4xf32>
    %125 = "tosa.transpose"(%124, %22) : (tensor<1x512x4x4xf32>, tensor<4xi32>) -> tensor<1x4x4x512xf32>
    %126 = "tosa.transpose"(%1, %22) : (tensor<512x512x3x3xf32>, tensor<4xi32>) -> tensor<512x3x3x512xf32>
    %127 = "tosa.conv2d"(%125, %126, %26) {dilation = [1, 1], pad = [1, 1, 1, 1], stride = [1, 1]} : (tensor<1x4x4x512xf32>, tensor<512x3x3x512xf32>, tensor<512xf32>) -> tensor<1x4x4x512xf32>
    %128 = "tosa.transpose"(%127, %23) : (tensor<1x4x4x512xf32>, tensor<4xi32>) -> tensor<1x512x4x4xf32>
    %129 = "tosa.add"(%128, %119) : (tensor<1x512x4x4xf32>, tensor<1x512x4x4xf32>) -> tensor<1x512x4x4xf32>
    %130 = "tosa.clamp"(%129) {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x512x4x4xf32>) -> tensor<1x512x4x4xf32>
    %131 = "tosa.transpose"(%130, %22) : (tensor<1x512x4x4xf32>, tensor<4xi32>) -> tensor<1x4x4x512xf32>
    %132 = "tosa.avg_pool2d"(%131) {kernel = [4, 4], pad = [0, 0, 0, 0], stride = [1, 1]} : (tensor<1x4x4x512xf32>) -> tensor<1x1x1x512xf32>
    %133 = "tosa.transpose"(%132, %23) : (tensor<1x1x1x512xf32>, tensor<4xi32>) -> tensor<1x512x1x1xf32>
    %134 = "tosa.transpose"(%0, %27) : (tensor<10x512xf32>, tensor<2xi32>) -> tensor<512x10xf32>
    %135 = "tosa.reshape"(%133) {new_shape = [1, 1, 512]} : (tensor<1x512x1x1xf32>) -> tensor<1x1x512xf32>
    %136 = "tosa.reshape"(%134) {new_shape = [1, 512, 10]} : (tensor<512x10xf32>) -> tensor<1x512x10xf32>
    %137 = "tosa.matmul"(%135, %136) : (tensor<1x1x512xf32>, tensor<1x512x10xf32>) -> tensor<1x1x10xf32>
    %138 = "tosa.reshape"(%137) {new_shape = [1, 10]} : (tensor<1x1x10xf32>) -> tensor<1x10xf32>
    %139 = "tosa.add"(%138, %28) : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32>
    return %139 : tensor<1x10xf32>

This gives me the following error:

scalehls-opt resnet18.mlir     -scalehls-pytorch-pipeline-v2="top-func=forward loop-tile-size=4 loop-unroll-factor=2"     | scalehls-translate -emit-hlscpp > resnet18.cpp

resnet18.mlir:139:12: error: 'linalg.generic' op expected the shape-to-loops map to be non-null
    %136 = "tosa.reshape"(%134) {new_shape = [1, 512, 10]} : (tensor<512x10xf32>) -> tensor<1x512x10xf32>
           ^
resnet18.mlir:139:12: note: see current operation: %68 = "linalg.generic"(%62#1, %67) ({
^bb0(%arg1: f32, %arg2: f32):
  "linalg.yield"(%arg1) : (f32) -> ()
}) {indexing_maps = [affine_map<(d0) -> ((d0 floordiv 10) mod 512, d0 mod 10)>, affine_map<(d0) -> (0, (d0 floordiv 10) mod 512, d0 mod 10)>], iterator_types = ["parallel"], operand_segment_sizes = dense<1> : vector<2xi32>} : (tensor<512x10xf32>, tensor<1x512x10xf32>) -> tensor<1x512x10xf32>
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant