diff --git a/factory-ai-vision/EdgeSolution/deployment.cpu.arm64v8.template.json b/factory-ai-vision/EdgeSolution/deployment.cpu.arm64v8.template.json index a40eb6bf4..8646910dd 100644 --- a/factory-ai-vision/EdgeSolution/deployment.cpu.arm64v8.template.json +++ b/factory-ai-vision/EdgeSolution/deployment.cpu.arm64v8.template.json @@ -67,9 +67,9 @@ "createOptions": { "HostConfig": { "PortBindings": { - "8080/tcp": [ + "8181/tcp": [ { - "HostPort": "8080" + "HostPort": "8181" } ] } @@ -89,41 +89,26 @@ "ENDPOINT": { "value": "$CUSTOM_VISION_ENDPOINT" }, - "DBNAME": { "value": "vision_on_edge" }, - "DBHOST": { "value": "172.18.0.1" }, - "DBUSER": { "value": "vision_on_edge" }, - "DBPASS": { "value": "vision_on_edge" }, - "DF_INFERENECE_IS_GPU": { "value": false } + "DBNAME": { + "value": "vision_on_edge" + }, + "DBHOST": { + "value": "172.18.0.1" + }, + "DBUSER": { + "value": "vision_on_edge" + }, + "DBPASS": { + "value": "vision_on_edge" + }, + "DF_INFERENECE_IS_GPU": { + "value": false + } }, "settings": { "image": "${MODULES.WebModule.arm64v8-ubuntu}" } }, - "WebDBModule": { - "version": "1.0", - "type": "docker", - "status": "running", - "restartPolicy": "always", - "env": { - "POSTGRES_PASSWORD": { "value": "vision_on_edge" }, - "POSTGRES_USER": { "value": "vision_on_edge" }, - "POSTGRES_DB": { "value": "vision_on_edge" } - }, - "settings": { - "image": "${MODULES.WebDBModule.arm64v8}", - "createOptions": { - "HostConfig": { - "PortBindings": { - "5432/tcp": [ - { - "HostPort": "5432" - } - ] - } - } - } - } - }, "InferenceModule": { "version": "1.0", "type": "docker", @@ -157,6 +142,54 @@ } } } + }, + "lvaEdge": { + "version": "1.0", + "type": "docker", + "status": "running", + "restartPolicy": "always", + "settings": { + "image": "mcr.microsoft.com/media/live-video-analytics:1.0.4", + "createOptions": { + "HostConfig": { + "LogConfig": { + "Type": "", + "Config": { + "max-size": "10m", + "max-file": "10" + } + }, + "IpcMode": "host" + } + } + } + }, + "rtspsim": { + "version": "1.0", + "type": "docker", + "status": "running", + "restartPolicy": "always", + "settings": { + "image": "${MODULES.RtspSimModule}", + "createOptions": { + "HostConfig": { + "LogConfig": { + "Type": "", + "Config": { + "max-size": "10m", + "max-file": "10" + } + }, + "PortBindings": { + "554/tcp": [ + { + "HostPort": "554" + } + ] + } + } + } + } } } } @@ -165,7 +198,8 @@ "properties.desired": { "schemaVersion": "1.0", "routes": { - "metrics": "FROM /messages/modules/InferenceModule/outputs/metrics INTO $upstream" + "metrics": "FROM /messages/modules/InferenceModule/outputs/metrics INTO $upstream", + "InferenceToLVA": "FROM /messages/modules/InferenceModule/outputs/InferenceToLVA INTO BrokeredEndpoint(\"/modules/lvaEdge/inputs/recordingTrigger\")" }, "storeAndForwardConfiguration": { "timeToLiveSecs": 7200 @@ -177,6 +211,24 @@ }, "VisionSampleModule": { "properties.desired": {} + }, + "lvaEdge": { + "properties.desired": { + "applicationDataDirectory": "/var/media", + "azureMediaServicesArmId": "/subscriptions/$SUBSCRIPTION_ID/resourcegroups/$RESOURCE_GROUP/providers/microsoft.media/mediaservices/$SERVICE_NAME", + "aadTenantId": "$TENANT_ID", + "aadServicePrincipalAppId": "$SERVICE_PRINCIPAL_APP_ID", + "aadServicePrincipalSecret": "$SERVICE_PRINCIPAL_SECRET", + "aadEndpoint": "https://login.microsoftonline.com", + "aadResourceId": "https://management.core.windows.net/", + "armEndpoint": "https://management.azure.com/", + "diagnosticsEventsOutputName": "AmsDiagnostics", + "operationalEventsOutputName": "AmsOperational", + "logLevel": "Information", + "logCategories": "Application,Events", + "allowUnsecuredEndpoints": true, + "telemetryOptOut": false + } } } } diff --git a/factory-ai-vision/EdgeSolution/deployment.cpu.opencv.backend_only.template.json b/factory-ai-vision/EdgeSolution/deployment.cpu.opencv.backend_only.template.json index 83c7dff16..d2039c2e9 100644 --- a/factory-ai-vision/EdgeSolution/deployment.cpu.opencv.backend_only.template.json +++ b/factory-ai-vision/EdgeSolution/deployment.cpu.opencv.backend_only.template.json @@ -69,7 +69,7 @@ "PortBindings": { "8181/tcp": [ { - "HostPort": "8080" + "HostPort": "8181" } ] } diff --git a/factory-ai-vision/EdgeSolution/deployment.cpu.opencv.template.json b/factory-ai-vision/EdgeSolution/deployment.cpu.opencv.template.json index 071fbf8e1..2122ce9d4 100644 --- a/factory-ai-vision/EdgeSolution/deployment.cpu.opencv.template.json +++ b/factory-ai-vision/EdgeSolution/deployment.cpu.opencv.template.json @@ -137,26 +137,6 @@ } } }, - "WebDBModule": { - "version": "1.0", - "type": "docker", - "status": "running", - "restartPolicy": "always", - "env": { - "POSTGRES_PASSWORD": { - "value": "vision_on_edge" - }, - "POSTGRES_USER": { - "value": "vision_on_edge" - }, - "POSTGRES_DB": { - "value": "vision_on_edge" - } - }, - "settings": { - "image": "${MODULES.WebDBModule}" - } - }, "InferenceModule": { "version": "1.0", "type": "docker", diff --git a/factory-ai-vision/EdgeSolution/deployment.cpu.template.json b/factory-ai-vision/EdgeSolution/deployment.cpu.template.json index 7f6ec4961..2ae32b909 100644 --- a/factory-ai-vision/EdgeSolution/deployment.cpu.template.json +++ b/factory-ai-vision/EdgeSolution/deployment.cpu.template.json @@ -108,7 +108,9 @@ "DBPASS": { "value": "vision_on_edge" }, - "DF_INFERENECE_IS_GPU": { "value": false } + "DF_INFERENECE_IS_GPU": { + "value": false + } }, "settings": { "image": "${MODULES.WebModule}", @@ -132,44 +134,6 @@ } } }, - "WebDBModule": { - "version": "1.0", - "type": "docker", - "status": "running", - "restartPolicy": "always", - "env": { - "POSTGRES_PASSWORD": { - "value": "vision_on_edge" - }, - "POSTGRES_USER": { - "value": "vision_on_edge" - }, - "POSTGRES_DB": { - "value": "vision_on_edge" - } - }, - "settings": { - "image": "${MODULES.WebDBModule}", - "createOptions": { - "HostConfig": { - "LogConfig": { - "Type": "", - "Config": { - "max-size": "10m", - "max-file": "10" - } - }, - "PortBindings": { - "5432/tcp": [ - { - "HostPort": "5432" - } - ] - } - } - } - } - }, "InferenceModule": { "version": "1.0", "type": "docker", @@ -247,7 +211,7 @@ "settings": { "image": "${MODULES.RtspSimModule}", "createOptions": { - "HostConfig":{ + "HostConfig": { "LogConfig": { "Type": "", "Config": { @@ -255,10 +219,12 @@ "max-file": "10" } }, - "PortBindings":{ - "554/tcp":[{ - "HostPort":"554" - }] + "PortBindings": { + "554/tcp": [ + { + "HostPort": "554" + } + ] } } } diff --git a/factory-ai-vision/EdgeSolution/deployment.gpu.arm64v8.template.json b/factory-ai-vision/EdgeSolution/deployment.gpu.arm64v8.template.json index 7238b6e2f..993e8aba1 100644 --- a/factory-ai-vision/EdgeSolution/deployment.gpu.arm64v8.template.json +++ b/factory-ai-vision/EdgeSolution/deployment.gpu.arm64v8.template.json @@ -67,9 +67,9 @@ "createOptions": { "HostConfig": { "PortBindings": { - "8080/tcp": [ + "8181/tcp": [ { - "HostPort": "8080" + "HostPort": "8181" } ] } @@ -89,30 +89,26 @@ "ENDPOINT": { "value": "$CUSTOM_VISION_ENDPOINT" }, - "DBNAME": { "value": "vision_on_edge" }, - "DBHOST": { "value": "172.18.0.1" }, - "DBUSER": { "value": "vision_on_edge" }, - "DBPASS": { "value": "vision_on_edge" }, - "DF_INFERENECE_IS_GPU": { "value": true } + "DBNAME": { + "value": "vision_on_edge" + }, + "DBHOST": { + "value": "172.18.0.1" + }, + "DBUSER": { + "value": "vision_on_edge" + }, + "DBPASS": { + "value": "vision_on_edge" + }, + "DF_INFERENECE_IS_GPU": { + "value": true + } }, "settings": { "image": "${MODULES.WebModule.arm64v8-ubuntu}" } }, - "WebDBModule": { - "version": "1.0", - "type": "docker", - "status": "running", - "restartPolicy": "always", - "env": { - "POSTGRES_PASSWORD": { "value": "vision_on_edge" }, - "POSTGRES_USER": { "value": "vision_on_edge" }, - "POSTGRES_DB": { "value": "vision_on_edge" } - }, - "settings": { - "image": "${MODULES.WebDBModule.arm64v8}" - } - }, "InferenceModule": { "version": "1.0", "type": "docker", @@ -146,6 +142,54 @@ } } } + }, + "lvaEdge": { + "version": "1.0", + "type": "docker", + "status": "running", + "restartPolicy": "always", + "settings": { + "image": "mcr.microsoft.com/media/live-video-analytics:1.0.4", + "createOptions": { + "HostConfig": { + "LogConfig": { + "Type": "", + "Config": { + "max-size": "10m", + "max-file": "10" + } + }, + "IpcMode": "host" + } + } + } + }, + "rtspsim": { + "version": "1.0", + "type": "docker", + "status": "running", + "restartPolicy": "always", + "settings": { + "image": "${MODULES.RtspSimModule}", + "createOptions": { + "HostConfig": { + "LogConfig": { + "Type": "", + "Config": { + "max-size": "10m", + "max-file": "10" + } + }, + "PortBindings": { + "554/tcp": [ + { + "HostPort": "554" + } + ] + } + } + } + } } } } @@ -154,7 +198,8 @@ "properties.desired": { "schemaVersion": "1.0", "routes": { - "metrics": "FROM /messages/modules/InferenceModule/outputs/metrics INTO $upstream" + "metrics": "FROM /messages/modules/InferenceModule/outputs/metrics INTO $upstream", + "InferenceToLVA": "FROM /messages/modules/InferenceModule/outputs/InferenceToLVA INTO BrokeredEndpoint(\"/modules/lvaEdge/inputs/recordingTrigger\")" }, "storeAndForwardConfiguration": { "timeToLiveSecs": 7200 @@ -166,6 +211,24 @@ }, "VisionSampleModule": { "properties.desired": {} + }, + "lvaEdge": { + "properties.desired": { + "applicationDataDirectory": "/var/media", + "azureMediaServicesArmId": "/subscriptions/$SUBSCRIPTION_ID/resourcegroups/$RESOURCE_GROUP/providers/microsoft.media/mediaservices/$SERVICE_NAME", + "aadTenantId": "$TENANT_ID", + "aadServicePrincipalAppId": "$SERVICE_PRINCIPAL_APP_ID", + "aadServicePrincipalSecret": "$SERVICE_PRINCIPAL_SECRET", + "aadEndpoint": "https://login.microsoftonline.com", + "aadResourceId": "https://management.core.windows.net/", + "armEndpoint": "https://management.azure.com/", + "diagnosticsEventsOutputName": "AmsDiagnostics", + "operationalEventsOutputName": "AmsOperational", + "logLevel": "Information", + "logCategories": "Application,Events", + "allowUnsecuredEndpoints": true, + "telemetryOptOut": false + } } } } diff --git a/factory-ai-vision/EdgeSolution/deployment.gpu.opencv.template.json b/factory-ai-vision/EdgeSolution/deployment.gpu.opencv.template.json index d4c4882e8..e1c069bc7 100644 --- a/factory-ai-vision/EdgeSolution/deployment.gpu.opencv.template.json +++ b/factory-ai-vision/EdgeSolution/deployment.gpu.opencv.template.json @@ -137,26 +137,6 @@ } } }, - "WebDBModule": { - "version": "1.0", - "type": "docker", - "status": "running", - "restartPolicy": "always", - "env": { - "POSTGRES_PASSWORD": { - "value": "vision_on_edge" - }, - "POSTGRES_USER": { - "value": "vision_on_edge" - }, - "POSTGRES_DB": { - "value": "vision_on_edge" - } - }, - "settings": { - "image": "${MODULES.WebDBModule}" - } - }, "InferenceModule": { "version": "1.0", "type": "docker", diff --git a/factory-ai-vision/EdgeSolution/deployment.gpu.template.json b/factory-ai-vision/EdgeSolution/deployment.gpu.template.json index 0475a2c65..988eaa420 100644 --- a/factory-ai-vision/EdgeSolution/deployment.gpu.template.json +++ b/factory-ai-vision/EdgeSolution/deployment.gpu.template.json @@ -124,20 +124,6 @@ } } }, - "WebDBModule": { - "version": "1.0", - "type": "docker", - "status": "running", - "restartPolicy": "always", - "env": { - "POSTGRES_PASSWORD": { "value": "vision_on_edge" }, - "POSTGRES_USER": { "value": "vision_on_edge" }, - "POSTGRES_DB": { "value": "vision_on_edge" } - }, - "settings": { - "image": "${MODULES.WebDBModule}" - } - }, "InferenceModule": { "version": "1.0", "type": "docker", @@ -216,7 +202,7 @@ "settings": { "image": "${MODULES.RtspSimModule}", "createOptions": { - "HostConfig":{ + "HostConfig": { "LogConfig": { "Type": "", "Config": { @@ -224,10 +210,12 @@ "max-file": "10" } }, - "PortBindings":{ - "554/tcp":[{ - "HostPort":"554" - }] + "PortBindings": { + "554/tcp": [ + { + "HostPort": "554" + } + ] } } } diff --git a/factory-ai-vision/EdgeSolution/deployment.ase.gpu.template.json b/factory-ai-vision/EdgeSolution/deployment.vpu.opencv.template.json similarity index 71% rename from factory-ai-vision/EdgeSolution/deployment.ase.gpu.template.json rename to factory-ai-vision/EdgeSolution/deployment.vpu.opencv.template.json index 951cf654d..3df15c07d 100644 --- a/factory-ai-vision/EdgeSolution/deployment.ase.gpu.template.json +++ b/factory-ai-vision/EdgeSolution/deployment.vpu.opencv.template.json @@ -22,7 +22,7 @@ "edgeAgent": { "type": "docker", "settings": { - "image": "mcr.microsoft.com/azureiotedge-agent:1.0", + "image": "mcr.microsoft.com/azureiotedge-agent:1.0.9.5", "createOptions": {} } }, @@ -66,10 +66,17 @@ "image": "${MODULES.NginxModule.amd64}", "createOptions": { "HostConfig": { + "LogConfig": { + "Type": "", + "Config": { + "max-size": "10m", + "max-file": "10" + } + }, "PortBindings": { - "8080/tcp": [ + "8181/tcp": [ { - "HostPort": "8080" + "HostPort": "8181" } ] } @@ -89,34 +96,35 @@ "ENDPOINT": { "value": "$CUSTOM_VISION_ENDPOINT" }, - "DBNAME": { "value": "vision_on_edge" }, - "DBHOST": { "value": "172.18.0.1" }, - "DBUSER": { "value": "vision_on_edge" }, - "DBPASS": { "value": "vision_on_edge" }, - "DF_INFERENECE_IS_GPU": { "value": true } - }, - "settings": { - "image": "${MODULES.WebModule}" - } - }, - "WebDBModule": { - "version": "1.0", - "type": "docker", - "status": "running", - "restartPolicy": "always", - "env": { - "POSTGRES_PASSWORD": { "value": "vision_on_edge" }, - "POSTGRES_USER": { "value": "vision_on_edge" }, - "POSTGRES_DB": { "value": "vision_on_edge" } + "DBNAME": { + "value": "vision_on_edge" + }, + "DBHOST": { + "value": "172.18.0.1" + }, + "DBUSER": { + "value": "vision_on_edge" + }, + "DBPASS": { + "value": "vision_on_edge" + }, + "DF_INFERENECE_IS_GPU": { "value": false } }, "settings": { - "image": "${MODULES.WebDBModule}", + "image": "${MODULES.WebModule}", "createOptions": { "HostConfig": { + "LogConfig": { + "Type": "", + "Config": { + "max-size": "10m", + "max-file": "10" + } + }, "PortBindings": { - "5432/tcp": [ + "8000/tcp": [ { - "HostPort": "5432" + "HostPort": "8000" } ] } @@ -130,36 +138,43 @@ "status": "running", "restartPolicy": "always", "env": { - "LVA_MODE": { - "value": "http" - }, "IOTHUB_CONNECTION_STRING": { "value": "$IOTHUB_CONNECTION_STRING" + }, + "LVA_MODE": { + "value": "$LVA_MODE" } }, "settings": { - "image": "${MODULES.InferenceModule.gpuamd64}", + "image": "${MODULES.InferenceModule.vpuamd64}", "createOptions": { "HostConfig": { + "LogConfig": { + "Type": "", + "Config": { + "max-size": "10m", + "max-file": "10" + } + }, "PortBindings": { "5000/tcp": [ { "HostPort": "5000" } ], - "44000/tcp": [ + "5558/tcp": [ { - "HostPort": "44000" + "HostPort": "5558" } ], - "5558/tcp": [ + "44000/tcp": [ { - "HostPort": "5558" + "HostPort": "44000" } ] }, - "IpcMode": "container:lvaEdge", - "runtime": "nvidia" + "IpcMode": "host", + "runtime": "runc" } } } @@ -180,28 +195,69 @@ "max-file": "10" } }, - "Binds": [], - "IpcMode": "shareable" + "IpcMode": "host" } } } }, - - "rtspsim": { + "CVCaptureModule": { "version": "1.0", "type": "docker", "status": "running", "restartPolicy": "always", + "env": { + "IOTHUB_CONNECTION_STRING": { + "value": "$IOTHUB_CONNECTION_STRING" + } + }, "settings": { - "image": "${MODULES.RtspSimModule}", + "image": "${MODULES.CVCaptureModule.amd64}", "createOptions": { "HostConfig": { + "LogConfig": { + "Type": "", + "Config": { + "max-size": "10m", + "max-file": "10" + } + }, "PortBindings": { - "554/tcp": [ + "9000/tcp": [ + { + "HostPort": "9000" + } + ], + "5559/tcp": [ { - "HostPort": "554" + "HostPort": "5559" } ] + }, + "runtime": "runc" + } + } + } + }, + "rtspsim": { + "version": "1.0", + "type": "docker", + "status": "running", + "restartPolicy": "always", + "settings": { + "image": "${MODULES.RtspSimModule}", + "createOptions": { + "HostConfig":{ + "LogConfig": { + "Type": "", + "Config": { + "max-size": "10m", + "max-file": "10" + } + }, + "PortBindings":{ + "554/tcp":[{ + "HostPort":"554" + }] } } } diff --git a/factory-ai-vision/EdgeSolution/deployment.vpu.template.json b/factory-ai-vision/EdgeSolution/deployment.vpu.template.json index 008966bc7..45eaaa8a4 100644 --- a/factory-ai-vision/EdgeSolution/deployment.vpu.template.json +++ b/factory-ai-vision/EdgeSolution/deployment.vpu.template.json @@ -132,44 +132,6 @@ } } }, - "WebDBModule": { - "version": "1.0", - "type": "docker", - "status": "running", - "restartPolicy": "always", - "env": { - "POSTGRES_PASSWORD": { - "value": "vision_on_edge" - }, - "POSTGRES_USER": { - "value": "vision_on_edge" - }, - "POSTGRES_DB": { - "value": "vision_on_edge" - } - }, - "settings": { - "image": "${MODULES.WebDBModule}", - "createOptions": { - "HostConfig": { - "LogConfig": { - "Type": "", - "Config": { - "max-size": "10m", - "max-file": "10" - } - }, - "PortBindings": { - "5432/tcp": [ - { - "HostPort": "5432" - } - ] - } - } - } - } - }, "InferenceModule": { "version": "1.0", "type": "docker", diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/dockerfiles/Dockerfile.cpuamd64 b/factory-ai-vision/EdgeSolution/modules/InferenceModule/dockerfiles/Dockerfile.cpuamd64 index 1c13a042f..5973a8963 100644 --- a/factory-ai-vision/EdgeSolution/modules/InferenceModule/dockerfiles/Dockerfile.cpuamd64 +++ b/factory-ai-vision/EdgeSolution/modules/InferenceModule/dockerfiles/Dockerfile.cpuamd64 @@ -1,7 +1,13 @@ +# ========================================================= +# Base +# ========================================================= FROM mcr.microsoft.com/azureml/onnxruntime:latest WORKDIR /app +# ========================================================= +# Install system packages +# ========================================================= RUN apt-get update && \ apt-get install -y --no-install-recommends \ libffi-dev \ @@ -23,14 +29,20 @@ RUN apt-get update \ ENV LANG en_US.UTF-8 ENV LC_ALL en_US.UTF-8 +# ========================================================= +# Install Python package +# ========================================================= COPY requirements/base.txt ./requirements/base.txt +COPY requirements/cpu.txt ./requirements/cpu.txt ENV CONDA_ENV_NAME python38 RUN conda create --name python38 python=3.8.5 -y &&\ . activate python38 RUN [ "/bin/bash", "-c", "source activate python38 && pip install --upgrade pip"] -RUN [ "/bin/bash", "-c", "source activate python38 && pip install -r requirements/base.txt --ignore-installed"] -RUN [ "/bin/bash", "-c", "source activate python38 && pip install opencv-python onnxruntime" ] +RUN [ "/bin/bash", "-c", "source activate python38 && pip install -r requirements/cpu.txt --ignore-installed"] +# ========================================================= +# Copy Source Code +# ========================================================= COPY coco_classes.txt ./ COPY default_model default_model/ COPY default_model_6parts default_model_6parts/ @@ -68,6 +80,10 @@ COPY stream_manager.py ./ COPY streams.py ./ COPY tracker.py ./ COPY utility.py ./ + +# ========================================================= +# Run +# ========================================================= EXPOSE 5558 EXPOSE 5000 diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/dockerfiles/Dockerfile.cpuarm64v8 b/factory-ai-vision/EdgeSolution/modules/InferenceModule/dockerfiles/Dockerfile.cpuarm64v8 index 5d5f273e1..28b7699b3 100644 --- a/factory-ai-vision/EdgeSolution/modules/InferenceModule/dockerfiles/Dockerfile.cpuarm64v8 +++ b/factory-ai-vision/EdgeSolution/modules/InferenceModule/dockerfiles/Dockerfile.cpuarm64v8 @@ -1,30 +1,124 @@ -FROM mcr.microsoft.com/azureml/onnxruntime:latest as base +# ========================================================= +# Base +# ========================================================= +FROM arm64v8/python:3.8 +WORKDIR /app +ENV LANG en_US.UTF-8 +ENV LANGUAGE=en_US.UTF-8 +ARG SYSTEM_CORES=4 + +# ========================================================= +# Install system packages +# ========================================================= RUN apt-get update && \ apt-get install -y --no-install-recommends \ + build-essential \ + cmake \ git \ + wget \ + unzip \ + yasm \ + pkg-config \ + libtbb2 \ + libtbb-dev \ + libpq-dev \ libprotobuf-dev \ protobuf-compiler \ - libgl1-mesa-glx \ - libgtk2.0-dev \ - cmake \ + libgeos-dev \ unzip \ && rm -rf /var/lib/apt/lists/* -# libgl1-mesa-glx: opencv2 libGL.so error workaround +# protobuf-compiler: https://github.com/onnx/onnx#build-onnx-on-arm-64 +# libprotobuf-dev: https://github.com/onnx/onnx#build-onnx-on-arm-64 +# libgeos-dev: Shapely + + +# ========================================================= +# Install Python package +# ========================================================= WORKDIR /app +RUN pip install --upgrade pip +RUN pip install wheel cython protobuf COPY requirements/base.txt ./requirements/base.txt -ENV CONDA_ENV_NAME python38 -RUN conda create --name python38 python=3.8.5 -y -RUN [ "/bin/bash", "-c", "source activate python38 && pip install --upgrade pip"] -RUN [ "/bin/bash", "-c", "source activate python38 && pip install -r requirements/base.txt --ignore-installed"] -RUN [ "/bin/bash", "-c", "source activate python38 && pip install opencv-python onnxruntime"] +RUN pip install $(cat ./requirements/base.txt | grep numpy) +RUN ["pip", "install", "-r", "requirements/base.txt"] + +# ========================================================= +# Install onnxruntime +# ========================================================= +ARG ONNXRUNTIME_REPO="https://github.com/Microsoft/onnxruntime" +ARG ONNXRUNTIME_SERVER_BRANCH="rel-1.5.2" + +WORKDIR /code + +RUN git clone \ + --single-branch \ + --branch ${ONNXRUNTIME_SERVER_BRANCH} \ + --recursive ${ONNXRUNTIME_REPO} onnxruntime && \ + /bin/sh onnxruntime/dockerfiles/scripts/install_common_deps.sh && \ + cd onnxruntime && \ + /bin/sh ./build.sh \ + --use_openmp \ + --config Release \ + --build_wheel \ + --update \ + --build \ + --parallel \ + --cmake_extra_defines ONNXRUNTIME_VERSION=$(cat ./VERSION_NUMBER) && \ + pip install /code/onnxruntime/build/Linux/Release/dist/*.whl && \ + cd .. && \ + rm -rf onnxruntime + +RUN python -c "import onnxruntime; print(onnxruntime.__version__)" + +# ========================================================= +# Install opencv +# ========================================================= +ARG OPENCV_REPO="https://github.com/opencv/opencv.git" +ARG OPENCV_TAG="4.4.0" + +WORKDIR /code + +RUN apt-get update && \ + apt-get install -y\ + libavcodec-dev \ + libavformat-dev \ + libswscale-dev \ + libgstreamer-plugins-base1.0-dev \ + libgstreamer1.0-dev \ + libgtk-3-dev \ + libpng-dev \ + libjpeg-dev \ + libopenexr-dev \ + libtiff-dev \ + libwebp-dev \ + && rm -rf /var/lib/apt/lists/* + +RUN git clone --single-branch \ + --branch ${OPENCV_TAG} \ + --recursive ${OPENCV_REPO} \ + opencv && \ + cd opencv && \ + mkdir build && \ + cd build && \ + cmake ../ && \ + make -j${SYSTEM_CORES} && \ + make install && \ + cd /code && \ + rm -rf opencv + +RUN ["python","-c", "\"import cv2;print(cv2.__version__)\""] +# ========================================================= +# Copy Source Code +# ========================================================= COPY coco_classes.txt ./ COPY default_model default_model/ COPY default_model_6parts default_model_6parts/ COPY grpc_topology.json ./ +COPY http_topology.json ./ COPY sample_video sample_video/ COPY scenario_models scenario_models/ RUN chmod 777 sample_video/video.mp4 @@ -38,6 +132,7 @@ COPY exception_handler.py ./ COPY extension_pb2.py ./ COPY extension_pb2_grpc.py ./ COPY http_inference_engine.py ./ +COPY img.png ./ COPY inference_engine.py ./ COPY inferencing_pb2.py ./ COPY invoke.py ./ @@ -57,7 +152,10 @@ COPY streams.py ./ COPY tracker.py ./ COPY utility.py ./ +# ========================================================= +# Run +# ========================================================= EXPOSE 5558 EXPOSE 5000 -CMD [ "/bin/bash", "-c", "source activate python38 && python3 server.py -p 44000"] +CMD [ "python3", "server.py", "-p", "44000"] diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/dockerfiles/Dockerfile.gpuamd64 b/factory-ai-vision/EdgeSolution/modules/InferenceModule/dockerfiles/Dockerfile.gpuamd64 index 5cc659169..5b556a7ba 100644 --- a/factory-ai-vision/EdgeSolution/modules/InferenceModule/dockerfiles/Dockerfile.gpuamd64 +++ b/factory-ai-vision/EdgeSolution/modules/InferenceModule/dockerfiles/Dockerfile.gpuamd64 @@ -1,8 +1,13 @@ -#FROM mcr.microsoft.com/azureml/onnxruntime:v0.5.0-tensorrt19.06 +# ========================================================= +# Base +# ========================================================= FROM mcr.microsoft.com/azureml/onnxruntime:latest-cuda WORKDIR /app +# ========================================================= +# Install system packages +# ========================================================= RUN apt-get update && \ apt-get install -y --no-install-recommends \ libboost-filesystem-dev \ @@ -12,7 +17,9 @@ RUN apt-get update && \ libgl1-mesa-glx \ libgtk2.0-dev \ libssl-dev \ + unzip \ && rm -rf /var/lib/apt/lists/* +# libgl1-mesa-glx: opencv2 libGL.so error workaround RUN apt-get update \ && DEBIAN_FRONTEND=noninteractive apt-get install -y locales \ @@ -25,18 +32,32 @@ RUN apt-get update \ ENV LANG en_US.UTF-8 ENV LC_ALL en_US.UTF-8 +# ========================================================= +# Install Miniconda +# ========================================================= RUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh RUN rm -rf /opt/miniconda RUN bash Miniconda3-latest-Linux-x86_64.sh -b -p /opt/miniconda +RUN echo ". /opt/miniconda/etc/profile.d/conda.sh" >> ~/.bashrc +RUN conda init bash +RUN conda create --name python38 python=3.8.5 -y +SHELL ["conda", "run", "-n", "python38", "/bin/bash", "-c"] +RUN which pip +RUN which python3 + +# ========================================================= +# Install Python package +# ========================================================= +RUN pip install --upgrade pip COPY requirements/base.txt ./requirements/base.txt -ENV CONDA_ENV_NAME python38 -RUN conda create --name python38 python=3.8.5 -y &&\ - . activate python38 -RUN [ "/bin/bash", "-c", "source activate python38 && pip install --upgrade pip"] -RUN [ "/bin/bash", "-c", "source activate python38 && pip install -r requirements/base.txt --ignore-installed"] -RUN [ "/bin/bash", "-c", "source activate python38 && pip install opencv-python onnxruntime-gpu==1.5.1" ] +RUN pip install -r requirements/base.txt +COPY requirements/gpu.txt ./requirements/gpu.txt +RUN pip install -r requirements/gpu.txt +# ========================================================= +# Copy Source Code +# ========================================================= COPY coco_classes.txt ./ COPY default_model default_model/ COPY default_model_6parts default_model_6parts/ @@ -75,8 +96,9 @@ COPY streams.py ./ COPY tracker.py ./ COPY utility.py ./ -RUN apt-get update && apt-get install unzip - +# ========================================================= +# Run +# ========================================================= EXPOSE 5558 EXPOSE 5000 diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/dockerfiles/Dockerfile.gpuarm64v8 b/factory-ai-vision/EdgeSolution/modules/InferenceModule/dockerfiles/Dockerfile.gpuarm64v8 index a5983311e..f32350508 100644 --- a/factory-ai-vision/EdgeSolution/modules/InferenceModule/dockerfiles/Dockerfile.gpuarm64v8 +++ b/factory-ai-vision/EdgeSolution/modules/InferenceModule/dockerfiles/Dockerfile.gpuarm64v8 @@ -1,59 +1,157 @@ -FROM mcr.microsoft.com/azureml/onnxruntime:v.1.4.0-jetpack4.4-l4t-base-r32.4.3 as base +# ========================================================= +# Base +# ========================================================= +ARG BASE_IMAGE=nvcr.io/nvidia/l4t-base:r32.4.3 +FROM ${BASE_IMAGE} as base +WORKDIR /app +ENV LANG en_US.UTF-8 +ENV LANGUAGE=en_US.UTF-8 +ARG SYSTEM_CORES=4 + +# ========================================================= +# Install system packages +# ========================================================= RUN apt-get update && \ apt-get install -y --no-install-recommends \ + build-essential \ + cmake \ git \ + wget \ + unzip \ + yasm \ + pkg-config \ + libtbb2 \ + libtbb-dev \ + libpq-dev \ libprotobuf-dev \ protobuf-compiler \ + libgeos-dev \ + ca-certificates \ + bzip2 \ unzip \ && rm -rf /var/lib/apt/lists/* +# protobuf-compiler: https://github.com/onnx/onnx#build-onnx-on-arm-64 +# libprotobuf-dev: https://github.com/onnx/onnx#build-onnx-on-arm-64 +# libgeos-dev: Shapely +# ========================================================= +# Install Python package +# ========================================================= +WORKDIR /code/ -# Onnx builder -# -------------------------------------------------------- -FROM base as builder -RUN pip install cython protobuf +ARG MINIFORGE_VERSION=4.8.2-1 +ENV CONDA_DIR=/opt/conda +ENV LANG=C.UTF-8 LC_ALL=C.UTF-8 +ENV PATH=${CONDA_DIR}/bin:${PATH} -#RUN mkdir /code -WORKDIR /code -RUN git clone --single-branch https://github.com/onnx/onnx.git -WORKDIR /code/onnx -RUN git submodule update --init --recursive -RUN python3 setup.py install - -# Final Result -# -------------------------------------------------------- -FROM builder as final -#FROM nvidia/cuda:10.1-cudnn7-devel -WORKDIR /app +RUN wget --no-hsts \ + --quiet \ + https://github.com/conda-forge/miniforge/releases/download/${MINIFORGE_VERSION}/Miniforge3-${MINIFORGE_VERSION}-Linux-aarch64.sh \ + -O /tmp/miniforge.sh && \ + /bin/bash /tmp/miniforge.sh \ + -b \ + -p ${CONDA_DIR} && \ + rm /tmp/miniforge.sh && \ + conda clean -tipsy && \ + find ${CONDA_DIR} -follow -type f -name '*.a' -delete && \ + find ${CONDA_DIR} -follow -type f -name '*.pyc' -delete && \ + conda clean -afy -#Upgradin CUDA to 10.2 -#RUN sudo apt-get install -y software-properties-common -#RUN wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/cuda-ubuntu1604.pin -#RUN sudo mv cuda-ubuntu1604.pin /etc/apt/preferences.d/cuda-repository-pin-600 -#RUN sudo apt-key adv --fetch-keys http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/7fa2af80.pub -#RUN sudo add-apt-repository "deb http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/ /" -#RUN sudo apt-get update -#RUN sudo apt-get -y install cuda +RUN conda create --name python38 python=3.8 +RUN echo ". ${CONDA_DIR}/etc/profile.d/conda.sh && conda activate python38" >> /etc/skel/.bashrc +RUN echo ". ${CONDA_DIR}/etc/profile.d/conda.sh && conda activate python38" >> ~/.bashrc +SHELL ["conda", "run", "-n", "python38", "/bin/bash", "-c"] +# ========================================================= +# Install Python package +# ========================================================= +WORKDIR /app + +RUN pip install --upgrade pip +RUN pip install wheel cython protobuf COPY requirements/base.txt ./requirements/base.txt -ENV CONDA_ENV_NAME python38 -RUN conda create --name python38 python=3.8.5 -y && \ - . activate python38 -RUN [ "/bin/bash", "-c", "source activate python38 && pip install --upgrade pip"] -RUN [ "/bin/bash", "-c", "source activate python38 && pip install -r requirements/base.txt --ignore-installed"] -RUN [ "/bin/bash", "-c", "source activate python38 && pip install opencv-python onnxruntime" ] -RUN apt-get install -y python3-numpy python3-zmq -RUN pip install -U certifi --ignore-installed - -COPY requirements-gpuarm.txt ./ -RUN pip install -r requirements-gpuarm.txt -#RUN pip install onnxruntime-gpu +# Workaround scikit image requires numpy while dependency is not resolved +RUN pip install $(cat ./requirements/base.txt | grep numpy) +RUN pip install -r requirements/base.txt + +# ========================================================= +# Install onnxruntime +# ========================================================= +ARG ONNXRUNTIME_REPO="https://github.com/Microsoft/onnxruntime" +ARG ONNXRUNTIME_SERVER_BRANCH="rel-1.5.2" + +WORKDIR /code + +RUN git clone \ + --single-branch \ + --branch ${ONNXRUNTIME_SERVER_BRANCH} \ + --recursive ${ONNXRUNTIME_REPO} onnxruntime && \ + /bin/sh onnxruntime/dockerfiles/scripts/install_common_deps.sh && \ + cd onnxruntime && \ + /bin/sh ./build.sh \ + --use_openmp \ + --config Release \ + --build_wheel \ + --update \ + --build \ + --parallel \ + --use_cuda \ + --cuda_home /usr/local/cuda \ + --cudnn_home /usr/lib/aarch64-linux-gnu \ + --cmake_extra_defines ONNXRUNTIME_VERSION=$(cat ./VERSION_NUMBER) && \ + pip install /code/onnxruntime/build/Linux/Release/dist/*.whl && \ + cd .. && \ + rm -rf onnxruntime + +RUN python -c "import onnxruntime; print(onnxruntime.__version__)" + +# ========================================================= +# Install opencv +# ========================================================= +ARG OPENCV_REPO="https://github.com/opencv/opencv.git" +ARG OPENCV_TAG="4.4.0" + +WORKDIR /code + +RUN apt-get update && \ + apt-get install -y\ + libavcodec-dev \ + libavformat-dev \ + libswscale-dev \ + libgstreamer-plugins-base1.0-dev \ + libgstreamer1.0-dev \ + libgtk-3-dev \ + libpng-dev \ + libjpeg-dev \ + libopenexr-dev \ + libtiff-dev \ + libwebp-dev \ + && rm -rf /var/lib/apt/lists/* + +RUN git clone --single-branch \ + --branch ${OPENCV_TAG} \ + --recursive ${OPENCV_REPO} \ + opencv && \ + cd opencv && \ + mkdir build && \ + cd build && \ + cmake ../ && \ + make -j${SYSTEM_CORES} && \ + make install && \ + cd /code && \ + rm -rf opencv + +RUN ["python","-c", "\"import cv2;print(cv2.__version__)\""] +# ========================================================= +# Copy Source Code +# ========================================================= COPY coco_classes.txt ./ COPY default_model default_model/ COPY default_model_6parts default_model_6parts/ COPY grpc_topology.json ./ +COPY http_topology.json ./ COPY sample_video sample_video/ COPY scenario_models scenario_models/ RUN chmod 777 sample_video/video.mp4 @@ -67,6 +165,7 @@ COPY exception_handler.py ./ COPY extension_pb2.py ./ COPY extension_pb2_grpc.py ./ COPY http_inference_engine.py ./ +COPY img.png ./ COPY inference_engine.py ./ COPY inferencing_pb2.py ./ COPY invoke.py ./ @@ -86,7 +185,10 @@ COPY streams.py ./ COPY tracker.py ./ COPY utility.py ./ +# ========================================================= +# Run +# ========================================================= EXPOSE 5558 EXPOSE 5000 -CMD [ "/bin/bash", "-c", "source activate python38 && python3 server.py -p 44000"] +CMD [ "python3", "server.py", "-p", "44000"] diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/dockerfiles/Dockerfile.gpuarm64v8-ubuntu b/factory-ai-vision/EdgeSolution/modules/InferenceModule/dockerfiles/Dockerfile.gpuarm64v8-ubuntu deleted file mode 100644 index da8aabaab..000000000 --- a/factory-ai-vision/EdgeSolution/modules/InferenceModule/dockerfiles/Dockerfile.gpuarm64v8-ubuntu +++ /dev/null @@ -1,100 +0,0 @@ -# Still testing -# Base -# -------------------------------------------------------- -FROM arm64v8/ubuntu as base - -RUN apt-get update \ - && apt-get install -y locales \ - && sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen \ - && dpkg-reconfigure --frontend=noninteractive locales \ - && update-locale LANG=en_US.UTF-8 \ - && rm -rf /var/lib/apt/lists/* - -ENV LANG en_US.UTF-8 -ENV LC_ALL en_US.UTF-8 -ENV DEBIAN_FRONTEND noninteractive -# -RUN apt-get install -y python3 -RUN apt-get -y install python3-pip -RUN apt-get install -y libgl1-mesa-glx -RUN apt-get install -y python3-cryptography -RUN apt-get install -y python3-zmq -RUN apt-get install -y python3-opencv -RUN apt-get install -y python3-ruamel.yaml - -# Onnx Runtime Builder -# -------------------------------------------------------- -From base as onnxruntime-builder -ARG ONNXRUNTIME_REPO=https://github.com/Microsoft/onnxruntime -ARG ONNXRUNTIME_BRANCH=master - -RUN mkdir /code -WORKDIR /code -ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:/code/cmake-3.14.3-Linux-x86_64/bin:/opt/miniconda/bin:${PATH} -ENV LD_LIBRARY_PATH /opt/miniconda/lib:$LD_LIBRARY_PATH - -# Prepare onnxruntime repository & build onnxruntime with TensorRT -RUN apt-get install -y git -RUN git clone --single-branch --branch ${ONNXRUNTIME_BRANCH} --recursive ${ONNXRUNTIME_REPO} onnxruntime -RUN /bin/sh onnxruntime/dockerfiles/scripts/install_common_deps.sh -RUN cp onnxruntime/docs/Privacy.md /code/Privacy.md -RUN cp onnxruntime/dockerfiles/LICENSE-IMAGE.txt /code/LICENSE-IMAGE.txt -RUN cp onnxruntime/ThirdPartyNotices.txt /code/ThirdPartyNotices.txt -RUN cd onnxruntime &&\ - /bin/sh ./build.sh --cuda_home /usr/local/cuda --cudnn_home /usr/lib/x86_64-linux-gnu/ --use_tensorrt --tensorrt_home /workspace/tensorrt --config Release --build_wheel --update --build --cmake_extra_defines ONNXRUNTIME_VERSION=$(cat ./VERSION_NUMBER) -RUN pip3 install /code/onnxruntime/build/Linux/Release/dist/*.whl -RUN cd .. &&\ - rm -rf onnxruntime cmake-3.14.3-Linux-x86_64 - -# Final result -# -------------------------------------------------------- -FROM onnxruntime-builder as final - -RUN mkdir /app -WORKDIR /app - -RUN apt-get install -y numpy -COPY requirements.txt ./ -RUN pip3 install --upgrade pip -RUN pip3 install -U certifi --ignore-installed -RUN pip3 install -r requirements.txt --ignore-installed && \ - pip3 install flask - -COPY coco_classes.txt ./ -COPY default_model default_model/ -COPY default_model_6parts default_model_6parts/ -COPY grpc_topology.json ./ -COPY sample_video sample_video/ -COPY scenario_models scenario_models/ -RUN chmod 777 sample_video/video.mp4 -RUN chmod 777 default_model - -COPY arguments.py ./ -COPY config.py ./ -COPY exception_handler.py ./ -COPY extension_pb2.py ./ -COPY extension_pb2_grpc.py ./ -COPY http_inference_engine.py ./ -COPY inference_engine.py ./ -COPY inferencing_pb2.py ./ -COPY invoke.py ./ -COPY logging_conf/logging_config.py ./logging_conf/logging_config.py -COPY main.py ./ -COPY media_pb2.py ./ -COPY model_wrapper.py ./ -COPY object_detection.py ./ -COPY object_detection2.py ./ -COPY onnxruntime_predict.py ./ -COPY scenarios.py ./ -COPY server.py ./ -COPY shared_memory.py ./ -COPY sort.py ./ -COPY stream_manager.py ./ -COPY streams.py ./ -COPY tracker.py ./ -COPY utility.py ./ - -EXPOSE 5558 -EXPOSE 5000 - -CMD [ "python3 server.py -p 44000" ] diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/main.py b/factory-ai-vision/EdgeSolution/modules/InferenceModule/main.py index 6030e6856..d9c370f5b 100644 --- a/factory-ai-vision/EdgeSolution/modules/InferenceModule/main.py +++ b/factory-ai-vision/EdgeSolution/modules/InferenceModule/main.py @@ -53,9 +53,9 @@ SCENARIO3_VIDEO = "../RtspSimModule/videos/scenario3-defect-detection.mkv" DEFAULT_MODEL = "default_model" -SCENARIO1_MODEL = "scenario_models/1" -SCENARIO2_MODEL = "scenario_models/2" -SCENARIO3_MODEL = "scenario_models/3" +SCENARIO1_MODEL = "scenario_models/onnx/1" +SCENARIO2_MODEL = "scenario_models/onnx/2" +SCENARIO3_MODEL = "scenario_models/onnx/3" DOWNLOADED_MODEL = "model" ### CONFIGURATION ### @@ -92,7 +92,7 @@ def web_module_url(): if is_edge(): - return "172.18.0.1:8080" + return "172.18.0.1:8181" else: return "localhost:8000" diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/model_wrapper.py b/factory-ai-vision/EdgeSolution/modules/InferenceModule/model_wrapper.py index d0bd0992f..f1dddfc0d 100644 --- a/factory-ai-vision/EdgeSolution/modules/InferenceModule/model_wrapper.py +++ b/factory-ai-vision/EdgeSolution/modules/InferenceModule/model_wrapper.py @@ -36,9 +36,10 @@ class ONNXRuntimeModelDeploy(ObjectDetection): def __init__(self, cam_type="video_file", model_dir="./default_model"): self.lock = threading.Lock() - self.model = self.load_model( - model_dir, is_default_model=True, is_scenario_model=False - ) + #self.model = self.load_model( + # model_dir, is_default_model=True, is_scenario_model=False + #) + self.model = None self.model_uri = None self.model_downloading = False self.lva_mode = LVA_MODE @@ -62,6 +63,16 @@ def __init__(self, cam_type="video_file", model_dir="./default_model"): self.max_total_frame_rate = CPU_MAX_FRAME_RATE self.update_frame_rate_by_number_of_streams(1) + @property + def is_vpu(self): + return self.get_device() == 'vpu' + + def get_device(self): + device = onnxruntime.get_device() + if device == 'CPU-OPENVINO_MYRIAD': + device = 'vpu' + return device.lower() + def set_is_scenario(self, is_scenario): self.is_scenario = is_scenario @@ -175,6 +186,13 @@ def run(self, model_uri, MODEL_DIR): def update_model(self, model_dir): is_default_model = "default_model" in model_dir is_scenario_model = "scenario_models" in model_dir + + if is_scenario_model: + if self.is_vpu: + model_dir += '/onnxfloat16' + else: + model_dir += '/onnx' + model = self.load_model(model_dir, is_default_model, is_scenario_model) # Protected by Mutex diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/module.json b/factory-ai-vision/EdgeSolution/modules/InferenceModule/module.json index 8cc6eb4aa..b8ec4f44b 100644 --- a/factory-ai-vision/EdgeSolution/modules/InferenceModule/module.json +++ b/factory-ai-vision/EdgeSolution/modules/InferenceModule/module.json @@ -4,7 +4,7 @@ "image": { "repository": "${CONTAINER_REGISTRY_NAME}/intelligentedge/inferencemodule", "tag": { - "version": "0.1.549", + "version": "0.1.587", "platforms": { "cpuamd64": "./dockerfiles/Dockerfile.cpuamd64", "gpuamd64": "./dockerfiles/Dockerfile.gpuamd64", @@ -16,8 +16,7 @@ "arm32v7": "./dockerfiles/Dockerfile.arm32v7", "arm32v7.debug": "./dockerfiles/Dockerfile.arm32v7.debug", "arm64v8": "./dockerfiles/Dockerfile.arm64v8", - "arm64v8.debug": "./dockerfiles/Dockerfile.arm64v8.debug", - "fake": "./dockerfiles/Dockerfile.fake" + "arm64v8.debug": "./dockerfiles/Dockerfile.arm64v8.debug" } }, "buildOptions": [], diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/requirements/base.txt b/factory-ai-vision/EdgeSolution/modules/InferenceModule/requirements/base.txt index 669a91ab0..0ffb66875 100644 --- a/factory-ai-vision/EdgeSolution/modules/InferenceModule/requirements/base.txt +++ b/factory-ai-vision/EdgeSolution/modules/InferenceModule/requirements/base.txt @@ -1,31 +1,31 @@ +# arm/amd/x86 installable package +FastAPI +Jinja2==2.11.2 +MarkupSafe==1.1.1 +Pillow +Werkzeug==1.0.1 azure-iot-device==2.0.1 azure-iot-hub==2.2.2rc0 +certifi # certifi should always be latest chardet==3.0.4 click==7.1.2 -FastAPI +filterpy +grpcio idna==2.9 itsdangerous==1.1.0 janus==0.5.0 -Jinja2==2.11.2 -MarkupSafe==1.1.1 numpy==1.18.4 +onnx==1.7.0 paho-mqtt==1.5.0 protobuf==3.12.4 -requests==2.23.0 +pyzmq requests-unixsocket==0.2.0 +requests==2.23.0 +scikit-image +shapely six==1.14.0 transitions==0.8.1 typing-extensions==3.7.4.2 urllib3==1.25.9 -Werkzeug==1.0.1 -Pillow -pyzmq -opencv-python -onnx==1.7.0 -shapely -grpcio utility -filterpy -scikit-image uvicorn -certifi # certifi should always be latest diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/requirements/cpu.txt b/factory-ai-vision/EdgeSolution/modules/InferenceModule/requirements/cpu.txt new file mode 100644 index 000000000..de62b0fd7 --- /dev/null +++ b/factory-ai-vision/EdgeSolution/modules/InferenceModule/requirements/cpu.txt @@ -0,0 +1,4 @@ +-r ./base.txt +# cpu arm not installable package +opencv-python==4.4.0.44 +onnxruntime==1.5.2 diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/requirements/gpu.txt b/factory-ai-vision/EdgeSolution/modules/InferenceModule/requirements/gpu.txt new file mode 100644 index 000000000..53c661ff3 --- /dev/null +++ b/factory-ai-vision/EdgeSolution/modules/InferenceModule/requirements/gpu.txt @@ -0,0 +1,3 @@ +-r ./base.txt +opencv-python==4.4.0.44 +onnxruntime==1.5.2 diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/requirements/local.txt b/factory-ai-vision/EdgeSolution/modules/InferenceModule/requirements/local.txt index dd5be0a11..a07964211 100644 --- a/factory-ai-vision/EdgeSolution/modules/InferenceModule/requirements/local.txt +++ b/factory-ai-vision/EdgeSolution/modules/InferenceModule/requirements/local.txt @@ -1,4 +1,4 @@ --r ./base.txt +-r ./cpu.txt # Testing # --------------------------------------------------------- pytest==5.4.3 diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/1/cvexport.manifest b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/1/onnx/cvexport.manifest similarity index 100% rename from factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/1/cvexport.manifest rename to factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/1/onnx/cvexport.manifest diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/1/labels.txt b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/1/onnx/labels.txt similarity index 100% rename from factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/1/labels.txt rename to factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/1/onnx/labels.txt diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/1/model.onnx b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/1/onnx/model.onnx similarity index 100% rename from factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/1/model.onnx rename to factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/1/onnx/model.onnx diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/1/onnxfloat16/cvexport.manifest b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/1/onnxfloat16/cvexport.manifest new file mode 100644 index 000000000..82b06d9cc --- /dev/null +++ b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/1/onnxfloat16/cvexport.manifest @@ -0,0 +1,13 @@ +{ + "DomainType": "ObjectDetection", + "Platform": "ONNX", + "Flavor": "ONNXFloat16", + "ExporterVersion": "2.0", + "ExportedDate": "2020-10-26T18:52:34.248884Z", + "IterationId": "f45f52e5-ef0f-4aa4-8505-969ac41b8753", + "ModelFileName": "model.onnx", + "LabelFileName": "labels.txt", + "MetadataPropsFileName": null, + "ModelFileSHA1": "c3190318e7af89cfa3c3f1e4c534ee2def01677c", + "SchemaVersion": "1.0" +} \ No newline at end of file diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/1/onnxfloat16/labels.txt b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/1/onnxfloat16/labels.txt new file mode 100644 index 000000000..66221b440 --- /dev/null +++ b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/1/onnxfloat16/labels.txt @@ -0,0 +1 @@ +Box \ No newline at end of file diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/1/onnxfloat16/model.onnx b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/1/onnxfloat16/model.onnx new file mode 100644 index 000000000..81aa48cb7 Binary files /dev/null and b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/1/onnxfloat16/model.onnx differ diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/2/cvexport.manifest b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/2/onnx/cvexport.manifest similarity index 100% rename from factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/2/cvexport.manifest rename to factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/2/onnx/cvexport.manifest diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/2/labels.txt b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/2/onnx/labels.txt similarity index 100% rename from factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/2/labels.txt rename to factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/2/onnx/labels.txt diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/2/model.onnx b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/2/onnx/model.onnx similarity index 100% rename from factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/2/model.onnx rename to factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/2/onnx/model.onnx diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/2/onnxfloat16/cvexport.manifest b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/2/onnxfloat16/cvexport.manifest new file mode 100644 index 000000000..e339cdc03 --- /dev/null +++ b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/2/onnxfloat16/cvexport.manifest @@ -0,0 +1,13 @@ +{ + "DomainType": "ObjectDetection", + "Platform": "ONNX", + "Flavor": "ONNXFloat16", + "ExporterVersion": "2.0", + "ExportedDate": "2020-10-27T10:35:26.5408512Z", + "IterationId": "ed2f88e0-6aff-4f5f-9c9e-225e81727782", + "ModelFileName": "model.onnx", + "LabelFileName": "labels.txt", + "MetadataPropsFileName": null, + "ModelFileSHA1": "9aec019aab5f36dcc133bb3bddc7538fe86985c7", + "SchemaVersion": "1.0" +} \ No newline at end of file diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/2/onnxfloat16/labels.txt b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/2/onnxfloat16/labels.txt new file mode 100644 index 000000000..8c10d7137 --- /dev/null +++ b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/2/onnxfloat16/labels.txt @@ -0,0 +1 @@ +Person \ No newline at end of file diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/2/onnxfloat16/model.onnx b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/2/onnxfloat16/model.onnx new file mode 100644 index 000000000..9be184e1f Binary files /dev/null and b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/2/onnxfloat16/model.onnx differ diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/3/cvexport.manifest b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/3/onnx/cvexport.manifest similarity index 100% rename from factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/3/cvexport.manifest rename to factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/3/onnx/cvexport.manifest diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/3/labels.txt b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/3/onnx/labels.txt similarity index 100% rename from factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/3/labels.txt rename to factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/3/onnx/labels.txt diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/3/model.onnx b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/3/onnx/model.onnx similarity index 100% rename from factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/3/model.onnx rename to factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/3/onnx/model.onnx diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/3/onnxfloat16/cvexport.manifest b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/3/onnxfloat16/cvexport.manifest new file mode 100644 index 000000000..05f19902c --- /dev/null +++ b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/3/onnxfloat16/cvexport.manifest @@ -0,0 +1,13 @@ +{ + "DomainType": "ObjectDetection", + "Platform": "ONNX", + "Flavor": "ONNXFloat16", + "ExporterVersion": "2.0", + "ExportedDate": "2020-10-27T10:31:06.3293293Z", + "IterationId": "e938e04e-7e79-440d-9549-2b5b2898877c", + "ModelFileName": "model.onnx", + "LabelFileName": "labels.txt", + "MetadataPropsFileName": null, + "ModelFileSHA1": "832245f033e5ee19508ca7c3b42b6fb0a541fbf2", + "SchemaVersion": "1.0" +} \ No newline at end of file diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/3/onnxfloat16/labels.txt b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/3/onnxfloat16/labels.txt new file mode 100644 index 000000000..26ec432bb --- /dev/null +++ b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/3/onnxfloat16/labels.txt @@ -0,0 +1,2 @@ +Bottle - NG +Bottle - OK \ No newline at end of file diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/3/onnxfloat16/model.onnx b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/3/onnxfloat16/model.onnx new file mode 100644 index 000000000..148212d4b Binary files /dev/null and b/factory-ai-vision/EdgeSolution/modules/InferenceModule/scenario_models/3/onnxfloat16/model.onnx differ diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/server.py b/factory-ai-vision/EdgeSolution/modules/InferenceModule/server.py index e72c49506..9a64d7afe 100644 --- a/factory-ai-vision/EdgeSolution/modules/InferenceModule/server.py +++ b/factory-ai-vision/EdgeSolution/modules/InferenceModule/server.py @@ -19,6 +19,7 @@ import zmq from fastapi import BackgroundTasks, FastAPI, Request from fastapi.responses import StreamingResponse +import onnxruntime import extension_pb2_grpc from api.models import ( @@ -57,6 +58,8 @@ LVA_MODE = os.environ.get("LVA_MODE", "grpc") IS_OPENCV = os.environ.get("IS_OPENCV", "false") +NO_DISPLAY = os.environ.get("NO_DISPLAY", "false") + # Main thread onnx = ONNXRuntimeModelDeploy() @@ -115,6 +118,7 @@ def metrics(cam_id: str): last_prediction_count = {} is_gpu = onnx.is_gpu scenario_metrics = [] + device = onnx.get_device() stream = stream_manager.get_stream_by_id_danger(cam_id) if stream: @@ -134,6 +138,7 @@ def metrics(cam_id: str): "inference_num": inference_num, "unidentified_num": unidentified_num, "is_gpu": is_gpu, + 'device': device, "average_inference_time": average_inference_time, "last_prediction_count": last_prediction_count, "scenario_metrics": scenario_metrics, @@ -233,7 +238,7 @@ def update_cams(request_body: CamerasModel): # frame_rate = onnx.update_frame_rate_by_number_of_streams(n) # recommended_fps = onnx.get_recommended_frame_rate(n) onnx.set_frame_rate(frame_rate) - logger.warning('update frame rate to {0}'.format(frame_rate)) + logger.warning("update frame rate to {}".format(frame_rate)) # lva_mode @@ -382,7 +387,8 @@ def get_recommended_fps(number_of_cameras: int): Args: number_of_cameras (int): number_of_cameras """ - return {'fps': int(onnx.get_recommended_frame_rate(number_of_cameras))} + return {"fps": int(onnx.get_recommended_frame_rate(number_of_cameras))} + @app.get("/get_recommended_total_fps") def get_recommended_total_fps(): @@ -391,7 +397,11 @@ def get_recommended_total_fps(): Args: number_of_cameras (int): number_of_cameras """ - return {'fps': int(onnx.get_recommended_total_frame_rate())} + return {"fps": int(onnx.get_recommended_total_frame_rate())} + +@app.get("/recommended_fps") +def recommended_fps(): + return {"fps": int(onnx.get_recommended_total_frame_rate())} # @app.route("/get_current_fps") @@ -429,6 +439,7 @@ def keep_alive(self, cam_id): @app.get("/video_feed") async def video_feed(cam_id: str): + if NO_DISPLAY == "true" : return 'ok' stream = stream_manager.get_stream_by_id(cam_id) if stream: print("[INFO] Preparing Video Feed for stream %s" % cam_id, flush=True) @@ -452,6 +463,12 @@ async def keep_alive(cam_id: str): return "failed" +@app.get('/get_device') +def get_device(): + device = onnx.get_device() + return {'device': device} + + def init_topology(): """init_topology. @@ -459,8 +476,9 @@ def init_topology(): """ instances = gm.invoke_graph_instance_list() + logger.info("instances %s", instances) if instances["status"] != 200: - logger.warning("Failed to invoker direct method: %s", instances["payload"]) + logger.warning("Failed to invoke direct method: %s", instances["payload"]) return -1 logger.info( "========== Deleting %s instance(s) ==========", @@ -506,13 +524,13 @@ def benchmark(): SCENARIO1_MODEL = "scenario_models/1" n_threads = 3 - n_images = 30 + n_images = 15 logger.info("============= BenchMarking (Begin) ==================") logger.info("--- Settings ----") logger.info("%s threads", n_threads) logger.info("%s images", n_images) - stream_ids = list(str(i) for i in range(n_threads)) + stream_ids = list(str(i+10000) for i in range(n_threads)) stream_manager.update_streams(stream_ids) onnx.set_is_scenario(True) onnx.update_model(SCENARIO1_MODEL) @@ -543,13 +561,17 @@ def _f(): # print(t1-t0) discount = 0.75 - max_total_frame_rate = discount * (n_images * n_threads) / (t1-t0) + max_total_frame_rate = discount * (n_images * n_threads) / (t1 - t0) logger.info("---- Overall ----") logger.info("Processing %s images in %s seconds", n_images * n_threads, t1 - t0) logger.info(" Avg: %s ms per image", (t1 - t0) / (n_images * n_threads) * 1000) logger.info(" Recommended Total FPS: %s", max_total_frame_rate) logger.info("============= BenchMarking (End) ==================") + + stream_manager.update_streams([]) + + max_total_frame_rate = max(1, max_total_frame_rate) onnx.set_max_total_frame_rate(max_total_frame_rate) diff --git a/factory-ai-vision/EdgeSolution/modules/NginxModule/conf.d/default.conf b/factory-ai-vision/EdgeSolution/modules/NginxModule/conf.d/default.conf index 54bd61733..9efd6b9a0 100644 --- a/factory-ai-vision/EdgeSolution/modules/NginxModule/conf.d/default.conf +++ b/factory-ai-vision/EdgeSolution/modules/NginxModule/conf.d/default.conf @@ -53,7 +53,7 @@ server { access_log off; error_log /var/log/nginx/error.log warn; - # redirect all HTTP traffic to localhost:8080 + # redirect all HTTP traffic to localhost:8181 proxy_pass http://webmodule; proxy_set_header X-Real-IP $remote_addr; proxy_set_header Host $http_host; diff --git a/factory-ai-vision/EdgeSolution/modules/NginxModule/dockerfiles/Dockerfile.arm32v7 b/factory-ai-vision/EdgeSolution/modules/NginxModule/dockerfiles/Dockerfile.arm32v7 index 462f2237b..33eda1729 100644 --- a/factory-ai-vision/EdgeSolution/modules/NginxModule/dockerfiles/Dockerfile.arm32v7 +++ b/factory-ai-vision/EdgeSolution/modules/NginxModule/dockerfiles/Dockerfile.arm32v7 @@ -1,8 +1,8 @@ -FROM arm32v7:nginx - -# RUN rm /etc/nginx/conf.d/* -COPY ./conf.d/default.conf /etc/nginx/conf.d/default.conf -# COPY ./ssl /usr/share/nginx/ssl/ - -EXPOSE 8000 -EXPOSE 8080 \ No newline at end of file +FROM arm32v7/nginx + +# RUN rm /etc/nginx/conf.d/* +COPY ./conf.d/default.conf /etc/nginx/conf.d/default.conf +# COPY ./ssl /usr/share/nginx/ssl/ + +EXPOSE 8000 +EXPOSE 8181 diff --git a/factory-ai-vision/EdgeSolution/modules/NginxModule/dockerfiles/Dockerfile.arm64v8 b/factory-ai-vision/EdgeSolution/modules/NginxModule/dockerfiles/Dockerfile.arm64v8 index cfe569153..b7a554bb4 100644 --- a/factory-ai-vision/EdgeSolution/modules/NginxModule/dockerfiles/Dockerfile.arm64v8 +++ b/factory-ai-vision/EdgeSolution/modules/NginxModule/dockerfiles/Dockerfile.arm64v8 @@ -1,8 +1,8 @@ -FROM arm64v8:nginx +FROM arm64v8/nginx # RUN rm /etc/nginx/conf.d/* COPY ./conf.d/default.conf /etc/nginx/conf.d/default.conf # COPY ./ssl /usr/share/nginx/ssl/ EXPOSE 8000 -EXPOSE 8080 \ No newline at end of file +EXPOSE 8181 diff --git a/factory-ai-vision/EdgeSolution/modules/NginxModule/module.json b/factory-ai-vision/EdgeSolution/modules/NginxModule/module.json index 77b52faaa..14c14222c 100644 --- a/factory-ai-vision/EdgeSolution/modules/NginxModule/module.json +++ b/factory-ai-vision/EdgeSolution/modules/NginxModule/module.json @@ -4,7 +4,7 @@ "image": { "repository": "${CONTAINER_REGISTRY_NAME}/intelligentedge/nginxmodule", "tag": { - "version": "0.0.9", + "version": "0.0.10", "platforms": { "amd64": "./dockerfiles/Dockerfile.amd64", "arm32v7": "./dockerfiles/Dockerfile.arm32v7", diff --git a/factory-ai-vision/EdgeSolution/modules/WebModule/backend/Makefile b/factory-ai-vision/EdgeSolution/modules/WebModule/backend/Makefile index df740eda4..036232c00 100644 --- a/factory-ai-vision/EdgeSolution/modules/WebModule/backend/Makefile +++ b/factory-ai-vision/EdgeSolution/modules/WebModule/backend/Makefile @@ -31,6 +31,13 @@ test: ## unit test @pytest @echo "Testing passed\n" +.PHONY: graph +graph: ## unit test + @echo "Making graph..." + @echo "You may need to install graphviz" + @$(PYTHON) manage.py graph_models -a -o myapp_models.png + @echo "Graph: myapp_models.png" + .PHONY: coverage-html coverage-html: ## Dev @echo "Running coverage..." diff --git a/factory-ai-vision/EdgeSolution/modules/WebModule/backend/configs/settings/local.py b/factory-ai-vision/EdgeSolution/modules/WebModule/backend/configs/settings/local.py index 51ef8fe74..690ed1673 100644 --- a/factory-ai-vision/EdgeSolution/modules/WebModule/backend/configs/settings/local.py +++ b/factory-ai-vision/EdgeSolution/modules/WebModule/backend/configs/settings/local.py @@ -14,4 +14,6 @@ # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True +INSTALLED_APPS.append("django_extensions") + LOGGING = logging_config.LOGGING_CONFIG_DEV diff --git a/factory-ai-vision/EdgeSolution/modules/WebModule/backend/requirements/base.txt b/factory-ai-vision/EdgeSolution/modules/WebModule/backend/requirements/base.txt index b62abf3a2..e400a7c26 100644 --- a/factory-ai-vision/EdgeSolution/modules/WebModule/backend/requirements/base.txt +++ b/factory-ai-vision/EdgeSolution/modules/WebModule/backend/requirements/base.txt @@ -17,3 +17,4 @@ opencensus-ext-django==0.7.2 opencensus-ext-stackdriver==0.7.2 channels==2.4.0 django-extra-fields==3.0.0 +django-extensions==3.0.9 diff --git a/factory-ai-vision/EdgeSolution/modules/WebModule/backend/requirements/local.txt b/factory-ai-vision/EdgeSolution/modules/WebModule/backend/requirements/local.txt index fa9b2e6c8..1354ab31a 100644 --- a/factory-ai-vision/EdgeSolution/modules/WebModule/backend/requirements/local.txt +++ b/factory-ai-vision/EdgeSolution/modules/WebModule/backend/requirements/local.txt @@ -20,6 +20,7 @@ pre-commit==2.7.1 factory-boy==2.12.0 django-coverage-plugin==1.8.0 pytest-django==3.9.0 +django-extensions # IPython # --------------------------------------------------------- diff --git a/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/azure_part_detections/api/views.py b/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/azure_part_detections/api/views.py index 6b3367f48..62eb77c51 100644 --- a/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/azure_part_detections/api/views.py +++ b/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/azure_part_detections/api/views.py @@ -186,8 +186,14 @@ def configure(self, request, pk=None) -> Response: ) instance.has_configured = True instance.save() + if instance.inference_module.is_vpu(): + export_flavor = "ONNXFloat16" + else: + export_flavor = None - TRAINING_MANAGER.add(project_id=instance.project.id) + TRAINING_MANAGER.add( + project_id=instance.project.id, export_flavor=export_flavor + ) if_trained_then_deploy_helper(part_detection_id=instance.id) return Response({"status": "ok"}) diff --git a/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/azure_projects/models.py b/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/azure_projects/models.py index 00cccd0d9..8baba9edd 100644 --- a/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/azure_projects/models.py +++ b/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/azure_projects/models.py @@ -295,7 +295,7 @@ def train_project(self): is_task_success = True return is_task_success - def export_iterationv3_2(self, iteration_id): + def export_iterationv3_2(self, iteration_id, export_flavor: str = ""): """export_iterationv3_2. CustomVisionTrainingClient SDK may have some issues exporting. @@ -313,6 +313,8 @@ def export_iterationv3_2(self, iteration_id): + iteration_id + "/export?platform=ONNX" ) + if export_flavor: + url.append(f"&flavor={export_flavor}") res = requests.post( url, "{body}", headers={"Training-key": setting_obj.training_key} ) diff --git a/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/azure_projects/utils.py b/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/azure_projects/utils.py index 819f8f5de..18fc0709c 100644 --- a/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/azure_projects/utils.py +++ b/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/azure_projects/utils.py @@ -237,7 +237,7 @@ def pull_cv_project_helper(project_id, customvision_project_id: str, is_partial: logger.info("Pulling Custom Vision Project... End") -def train_project_worker(project_id): +def train_project_worker(project_id, export_flavor: str = ""): """train_project_worker. Args: @@ -425,7 +425,10 @@ def train_project_worker(project_id): status_init = False while True: time.sleep(1) - exports = trainer.get_exports(customvision_id, iteration.id) + if fp16: + exports = trainer.get_exports(customvision_id, iteration.id) + else: + exports = trainer.get_exports(customvision_id, iteration.id) if not status_init: upcreate_training_status( project_id=project_obj.id, @@ -435,7 +438,9 @@ def train_project_worker(project_id): status_init = True if len(exports) == 0 or not exports[0].download_uri: - res = project_obj.export_iterationv3_2(iteration.id) + res = project_obj.export_iterationv3_2( + iteration.id, export_flavor=export_flavor + ) logger.info("Export response from Custom Vision: %s", res.json()) continue break @@ -475,7 +480,7 @@ def train_project_worker(project_id): project_obj.save() -def train_project_catcher(project_id): +def train_project_catcher(project_id, export_flavor): """train_project_catcher. Dummy exception handler. @@ -503,7 +508,7 @@ def __init__(self): self.mutex = threading.Lock() self.garbage_collector() - def add(self, project_id): + def add(self, project_id, export_flavor: str = ""): """add. Add a project in training tasks. @@ -511,7 +516,7 @@ def add(self, project_id): if project_id in self.training_tasks: raise ProjectAlreadyTraining self.mutex.acquire() - task = TrainingTask(project_id=project_id) + task = TrainingTask(project_id=project_id, export_flavor=export_flavor) self.training_tasks[project_id] = task task.start() self.mutex.release() @@ -555,7 +560,7 @@ def _gc(self): class TrainingTask: """TrainingTask.""" - def __init__(self, project_id): + def __init__(self, project_id, export_flavor): """__init__. Args: @@ -564,6 +569,7 @@ def __init__(self, project_id): self.project_id = project_id self.status = "init" self.worker = None + self.export_flavor = export_flavor def start(self): """start.""" @@ -571,7 +577,7 @@ def start(self): self.worker = threading.Thread( target=train_project_catcher, name=f"train_project_worker_{self.project_id}", - kwargs={"project_id": self.project_id}, + kwargs={"project_id": self.project_id, "export_flavor": self.export_flavor}, daemon=True, ) self.worker.start() diff --git a/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/inference_modules/models.py b/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/inference_modules/models.py index 3e0556e7d..d043bb311 100644 --- a/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/inference_modules/models.py +++ b/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/inference_modules/models.py @@ -27,5 +27,13 @@ def recommended_fps(self) -> int: result = 10 return result + def is_vpu(self) -> bool: + try: + response = requests.get("http://" + self.url + "/get_device") + result = response.json()["device"] + return result == "vpu" + except: + return False + def __str__(self): return self.name diff --git a/factory-ai-vision/EdgeSolution/modules/WebModule/module.json b/factory-ai-vision/EdgeSolution/modules/WebModule/module.json index c030acda0..67a32d8b2 100644 --- a/factory-ai-vision/EdgeSolution/modules/WebModule/module.json +++ b/factory-ai-vision/EdgeSolution/modules/WebModule/module.json @@ -4,7 +4,7 @@ "image": { "repository": "${CONTAINER_REGISTRY_NAME}/intelligentedge/webmodule", "tag": { - "version": "0.3.533", + "version": "0.3.539", "platforms": { "amd64": "./dockerfiles/Dockerfile.amd64", "amd64.backend_only": "./dockerfiles/Dockerfile.amd64.backend_only", diff --git a/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/components/LiveViewContainer/LiveViewContainer.tsx b/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/components/LiveViewContainer/LiveViewContainer.tsx index 6ed1b79d0..51474e9a0 100644 --- a/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/components/LiveViewContainer/LiveViewContainer.tsx +++ b/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/components/LiveViewContainer/LiveViewContainer.tsx @@ -20,7 +20,7 @@ export const LiveViewContainer: React.FC<{ showVideo: boolean; cameraId: number; }> = ({ showVideo, cameraId }) => { - const showAOI = useSelector((state) => selectCameraById(state, cameraId)?.useAOI); + const showAOI = useSelector((state) => Boolean(selectCameraById(state, cameraId)?.useAOI)); const showCountingLine = useSelector( (state) => selectCameraById(state, cameraId)?.useCountingLine && diff --git a/factory-ai-vision/Installer/acs.zip b/factory-ai-vision/Installer/acs.zip index 1ad3f709b..5f6288009 100644 Binary files a/factory-ai-vision/Installer/acs.zip and b/factory-ai-vision/Installer/acs.zip differ diff --git a/factory-ai-vision/Installer/deployment.amd64.json b/factory-ai-vision/Installer/deployment.amd64.json index 3600028c0..54a71219c 100644 --- a/factory-ai-vision/Installer/deployment.amd64.json +++ b/factory-ai-vision/Installer/deployment.amd64.json @@ -37,7 +37,7 @@ "status": "running", "restartPolicy": "always", "settings": { - "image": "intelligentedge/nginxmodule:0.1.34-amd64", + "image": "intelligentedge/nginxmodule:0.1.35-amd64", "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"8181/tcp\":[{\"HostPort\":\"8181\"}]}}}" } }, @@ -67,31 +67,10 @@ } }, "settings": { - "image": "intelligentedge/webmodule:0.1.34-amd64", + "image": "intelligentedge/webmodule:0.1.35-amd64", "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"8000/tcp\":[{\"HostPort\":\"8000\"}]}}}" } }, - "WebDBModule": { - "version": "1.0", - "type": "docker", - "status": "running", - "restartPolicy": "always", - "env": { - "POSTGRES_PASSWORD": { - "value": "vision_on_edge" - }, - "POSTGRES_USER": { - "value": "vision_on_edge" - }, - "POSTGRES_DB": { - "value": "vision_on_edge" - } - }, - "settings": { - "image": "intelligentedge/webdbmodule:0.1.34-amd64", - "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"5432/tcp\":[{\"HostPort\":\"5432\"}]}}}" - } - }, "InferenceModule": { "version": "1.0", "type": "docker", @@ -103,7 +82,7 @@ } }, "settings": { - "image": "intelligentedge/inferencemodule:0.1.34-amd64", + "image": "intelligentedge/inferencemodule:0.1.35-amd64", "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"5000/tcp\":[{\"HostPort\":\"5000\"}],\"44000/tcp\":[{\"HostPort\":\"44000\"}],\"5558/tcp\":[{\"HostPort\":\"5558\"}]},\"IpcMode\":\"host\",\"runtime\":\"\"}}" } }, @@ -123,7 +102,7 @@ "status": "running", "restartPolicy": "always", "settings": { - "image": "intelligentedge/rtspsimmodule:0.1.34-amd64", + "image": "intelligentedge/rtspsimmodule:0.1.35-amd64", "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"554/tcp\":[{\"HostPort\":\"554\"}]}}}" } } diff --git a/factory-ai-vision/Installer/deployment.opencv.amd64.json b/factory-ai-vision/Installer/deployment.opencv.amd64.json index 8a330d959..c89a5711f 100644 --- a/factory-ai-vision/Installer/deployment.opencv.amd64.json +++ b/factory-ai-vision/Installer/deployment.opencv.amd64.json @@ -37,7 +37,7 @@ "status": "running", "restartPolicy": "always", "settings": { - "image": "intelligentedge/nginxmodule:0.1.34-amd64", + "image": "intelligentedge/nginxmodule:0.1.35-amd64", "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"8181/tcp\":[{\"HostPort\":\"8181\"}]}}}" } }, @@ -67,31 +67,10 @@ } }, "settings": { - "image": "intelligentedge/webmodule:0.1.34-amd64", + "image": "intelligentedge/webmodule:0.1.35-amd64", "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"8000/tcp\":[{\"HostPort\":\"8000\"}]}}}" } }, - "WebDBModule": { - "version": "1.0", - "type": "docker", - "status": "running", - "restartPolicy": "always", - "env": { - "POSTGRES_PASSWORD": { - "value": "vision_on_edge" - }, - "POSTGRES_USER": { - "value": "vision_on_edge" - }, - "POSTGRES_DB": { - "value": "vision_on_edge" - } - }, - "settings": { - "image": "intelligentedge/webdbmodule:0.1.34-amd64", - "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"5432/tcp\":[{\"HostPort\":\"5432\"}]}}}" - } - }, "InferenceModule": { "version": "1.0", "type": "docker", @@ -106,7 +85,7 @@ } }, "settings": { - "image": "intelligentedge/inferencemodule:0.1.34-amd64", + "image": "intelligentedge/inferencemodule:0.1.35-amd64", "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"5000/tcp\":[{\"HostPort\":\"5000\"}],\"44000/tcp\":[{\"HostPort\":\"44000\"}],\"5558/tcp\":[{\"HostPort\":\"5558\"}]},\"IpcMode\":\"host\",\"runtime\":\"\"}}" } }, @@ -116,7 +95,7 @@ "status": "running", "restartPolicy": "always", "settings": { - "image": "intelligentedge/cameramodule:0.1.34-amd64", + "image": "intelligentedge/cameramodule:0.1.35-amd64", "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"9000/tcp\":[{\"HostPort\":\"9000\"}],\"5559/tcp\":[{\"HostPort\":\"5559\"}]},\"runtime\":\"runc\"}}" } }, @@ -126,7 +105,7 @@ "status": "running", "restartPolicy": "always", "settings": { - "image": "intelligentedge/rtspsimmodule:0.1.34-amd64", + "image": "intelligentedge/rtspsimmodule:0.1.35-amd64", "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"554/tcp\":[{\"HostPort\":\"554\"}]}}}" } } diff --git a/factory-ai-vision/Installer/factory-ai-vision-install.sh b/factory-ai-vision/Installer/factory-ai-vision-install.sh index 236736f5d..7416789fa 100644 --- a/factory-ai-vision/Installer/factory-ai-vision-install.sh +++ b/factory-ai-vision/Installer/factory-ai-vision-install.sh @@ -384,15 +384,33 @@ if [ "$isCfg" != true ]; then fi - ################################ Check for GPU ########################################### - while true; do - read -p "Does your device have a GPU? (y or n): " -n 1 -r; echo - case $REPLY in - [Yy]* ) cpuGpu="gpu"; runtime="nvidia"; break;; - [Nn]* ) cpuGpu="cpu"; runtime="runc" ; break;; - * ) echo "Please answer yes or no.";; - esac + ################################ Check for Device ########################################### + #while true; do + # read -p "Does your device have a GPU? (y or n): " -n 1 -r; echo + # case $REPLY in + # [Yy]* ) cpuGpu="gpu"; runtime="nvidia"; break;; + # [Nn]* ) cpuGpu="cpu"; runtime="runc" ; break;; + # * ) echo "Please answer yes or no.";; + # esac + #done + PS3='Choose the number corisponding to the Azure Stack Edge device: ' + deviceOptions="cpu gpu vpu" + select cpuGpu in $deviceOptions + do + echo "you chose: " $cpuGpu + if [ "$cpuGpu" != "" ]; then + break + fi done + if [ "$cpuGpu" == "cpu" ]; then + runtime="runc" + fi + if [ "$cpuGpu" == "gpu" ]; then + runtime="nvidia" + fi + if [ "$cpuGpu" == "vpu" ]; then + runtime="runc" + fi fi #if [ $isCfg != true ]; then @@ -447,7 +465,7 @@ if [ "$isCfg" != true ]; then echo amsResourceGroup='"'$amsResourceGroup'"' >> $factoryaiConfigName echo amsTanantId='"'$amsTenantId'"' >> $factoryaiConfigName echo amsServicePrincipalName='"'$amsServicePrincipalName'"' >> $factoryaiConfigName - echo amsServicePrincipalSecret='"'$amsServicePrincipalName'"' >> $factoryaiConfigName + echo amsServicePrincipalSecret='"'$amsServicePrincipalSecret'"' >> $factoryaiConfigName echo edgeDeviceId='"'$edgeDeviceId'"' >> $factoryaiConfigName fi diff --git a/factory-ai-vision/Readme.md b/factory-ai-vision/Readme.md index 9e7b88ede..be4d85dce 100644 --- a/factory-ai-vision/Readme.md +++ b/factory-ai-vision/Readme.md @@ -57,10 +57,10 @@ Check out the architecture below to see how Vision on Edge works on both LVA and # Architecture ### LVA Module (Recommended) -![arch_img](https://github.com/linkernetworks/azure-intelligent-edge-patterns/raw/develop/factory-ai-vision/assets/lva.png) +![arch_img](https://github.com/linkernetworks/azure-intelligent-edge-patterns/raw/develop/factory-ai-vision/assets/LVA_nginx.png) ### OpenCV Module -![arch_img](https://github.com/linkernetworks/azure-intelligent-edge-patterns/raw/develop/factory-ai-vision/assets/opencv.png) +![arch_img](https://github.com/linkernetworks/azure-intelligent-edge-patterns/raw/develop/factory-ai-vision/assets/opencv_nginx.png) diff --git a/factory-ai-vision/assets/LVA_nginx.png b/factory-ai-vision/assets/LVA_nginx.png new file mode 100644 index 000000000..1de25d435 Binary files /dev/null and b/factory-ai-vision/assets/LVA_nginx.png differ diff --git a/factory-ai-vision/assets/factory AI diagram.pptx b/factory-ai-vision/assets/factory AI diagram.pptx new file mode 100644 index 000000000..1836433a1 Binary files /dev/null and b/factory-ai-vision/assets/factory AI diagram.pptx differ diff --git a/factory-ai-vision/assets/opencv_nginx.png b/factory-ai-vision/assets/opencv_nginx.png new file mode 100644 index 000000000..2d9801703 Binary files /dev/null and b/factory-ai-vision/assets/opencv_nginx.png differ