diff --git a/factory-ai-vision/EdgeSolution/deployment.cpu.opencv.backend_only.template.json b/factory-ai-vision/EdgeSolution/deployment.cpu.opencv.backend_only.template.json index e60009604..83c7dff16 100644 --- a/factory-ai-vision/EdgeSolution/deployment.cpu.opencv.backend_only.template.json +++ b/factory-ai-vision/EdgeSolution/deployment.cpu.opencv.backend_only.template.json @@ -67,7 +67,7 @@ "createOptions": { "HostConfig": { "PortBindings": { - "8080/tcp": [ + "8181/tcp": [ { "HostPort": "8080" } @@ -234,4 +234,4 @@ "properties.desired": {} } } -} +} \ No newline at end of file diff --git a/factory-ai-vision/EdgeSolution/deployment.vpu.template.json b/factory-ai-vision/EdgeSolution/deployment.vpu.template.json new file mode 100644 index 000000000..008966bc7 --- /dev/null +++ b/factory-ai-vision/EdgeSolution/deployment.vpu.template.json @@ -0,0 +1,307 @@ +{ + "$schema-template": "2.0.0", + "modulesContent": { + "$edgeAgent": { + "properties.desired": { + "schemaVersion": "1.0", + "runtime": { + "type": "docker", + "settings": { + "minDockerVersion": "v1.25", + "loggingOptions": "", + "registryCredentials": { + "intelcustomvision": { + "username": "$CONTAINER_REGISTRY_USERNAME", + "password": "$CONTAINER_REGISTRY_PASSWORD", + "address": "$CONTAINER_REGISTRY_NAME" + } + } + } + }, + "systemModules": { + "edgeAgent": { + "type": "docker", + "settings": { + "image": "mcr.microsoft.com/azureiotedge-agent:1.0.9.5", + "createOptions": {} + } + }, + "edgeHub": { + "type": "docker", + "status": "running", + "restartPolicy": "always", + "settings": { + "image": "mcr.microsoft.com/azureiotedge-hub:1.0", + "createOptions": { + "HostConfig": { + "PortBindings": { + "5671/tcp": [ + { + "HostPort": "5671" + } + ], + "8883/tcp": [ + { + "HostPort": "8883" + } + ], + "443/tcp": [ + { + "HostPort": "443" + } + ] + } + } + } + } + } + }, + "modules": { + "NginxModule": { + "version": "1.0", + "type": "docker", + "status": "running", + "restartPolicy": "always", + "settings": { + "image": "${MODULES.NginxModule.amd64}", + "createOptions": { + "HostConfig": { + "LogConfig": { + "Type": "", + "Config": { + "max-size": "10m", + "max-file": "10" + } + }, + "PortBindings": { + "8181/tcp": [ + { + "HostPort": "8181" + } + ] + } + } + } + } + }, + "WebModule": { + "version": "1.0", + "type": "docker", + "status": "running", + "restartPolicy": "always", + "env": { + "TRAINING_KEY": { + "value": "$CUSTOM_VISION_TRAINING_KEY" + }, + "ENDPOINT": { + "value": "$CUSTOM_VISION_ENDPOINT" + }, + "DBNAME": { + "value": "vision_on_edge" + }, + "DBHOST": { + "value": "172.18.0.1" + }, + "DBUSER": { + "value": "vision_on_edge" + }, + "DBPASS": { + "value": "vision_on_edge" + }, + "DF_INFERENECE_IS_GPU": { "value": false } + }, + "settings": { + "image": "${MODULES.WebModule}", + "createOptions": { + "HostConfig": { + "LogConfig": { + "Type": "", + "Config": { + "max-size": "10m", + "max-file": "10" + } + }, + "PortBindings": { + "8000/tcp": [ + { + "HostPort": "8000" + } + ] + } + } + } + } + }, + "WebDBModule": { + "version": "1.0", + "type": "docker", + "status": "running", + "restartPolicy": "always", + "env": { + "POSTGRES_PASSWORD": { + "value": "vision_on_edge" + }, + "POSTGRES_USER": { + "value": "vision_on_edge" + }, + "POSTGRES_DB": { + "value": "vision_on_edge" + } + }, + "settings": { + "image": "${MODULES.WebDBModule}", + "createOptions": { + "HostConfig": { + "LogConfig": { + "Type": "", + "Config": { + "max-size": "10m", + "max-file": "10" + } + }, + "PortBindings": { + "5432/tcp": [ + { + "HostPort": "5432" + } + ] + } + } + } + } + }, + "InferenceModule": { + "version": "1.0", + "type": "docker", + "status": "running", + "restartPolicy": "always", + "env": { + "IOTHUB_CONNECTION_STRING": { + "value": "$IOTHUB_CONNECTION_STRING" + }, + "LVA_MODE": { + "value": "$LVA_MODE" + } + }, + "settings": { + "image": "${MODULES.InferenceModule.vpuamd64}", + "createOptions": { + "HostConfig": { + "LogConfig": { + "Type": "", + "Config": { + "max-size": "10m", + "max-file": "10" + } + }, + "PortBindings": { + "5000/tcp": [ + { + "HostPort": "5000" + } + ], + "5558/tcp": [ + { + "HostPort": "5558" + } + ], + "44000/tcp": [ + { + "HostPort": "44000" + } + ] + }, + "IpcMode": "host", + "runtime": "runc" + } + } + } + }, + "lvaEdge": { + "version": "1.0", + "type": "docker", + "status": "running", + "restartPolicy": "always", + "settings": { + "image": "mcr.microsoft.com/media/live-video-analytics:1", + "createOptions": { + "HostConfig": { + "LogConfig": { + "Type": "", + "Config": { + "max-size": "10m", + "max-file": "10" + } + }, + "IpcMode": "host" + } + } + } + }, + + "rtspsim": { + "version": "1.0", + "type": "docker", + "status": "running", + "restartPolicy": "always", + "settings": { + "image": "${MODULES.RtspSimModule}", + "createOptions": { + "HostConfig":{ + "LogConfig": { + "Type": "", + "Config": { + "max-size": "10m", + "max-file": "10" + } + }, + "PortBindings":{ + "554/tcp":[{ + "HostPort":"554" + }] + } + } + } + } + } + } + } + }, + "$edgeHub": { + "properties.desired": { + "schemaVersion": "1.0", + "routes": { + "metrics": "FROM /messages/modules/InferenceModule/outputs/metrics INTO $upstream", + "InferenceToLVA": "FROM /messages/modules/InferenceModule/outputs/InferenceToLVA INTO BrokeredEndpoint(\"/modules/lvaEdge/inputs/recordingTrigger\")" + }, + "storeAndForwardConfiguration": { + "timeToLiveSecs": 7200 + } + } + }, + "WebModule": { + "properties.desired": {} + }, + "VisionSampleModule": { + "properties.desired": {} + }, + "lvaEdge": { + "properties.desired": { + "applicationDataDirectory": "/var/media", + "azureMediaServicesArmId": "/subscriptions/$SUBSCRIPTION_ID/resourcegroups/$RESOURCE_GROUP/providers/microsoft.media/mediaservices/$SERVICE_NAME", + "aadTenantId": "$TENANT_ID", + "aadServicePrincipalAppId": "$SERVICE_PRINCIPAL_APP_ID", + "aadServicePrincipalSecret": "$SERVICE_PRINCIPAL_SECRET", + "aadEndpoint": "https://login.microsoftonline.com", + "aadResourceId": "https://management.core.windows.net/", + "armEndpoint": "https://management.azure.com/", + "diagnosticsEventsOutputName": "AmsDiagnostics", + "operationalEventsOutputName": "AmsOperational", + "logLevel": "Information", + "logCategories": "Application,Events", + "allowUnsecuredEndpoints": true, + "telemetryOptOut": false + } + } + } +} diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/dockerfiles/Dockerfile.vpuamd64 b/factory-ai-vision/EdgeSolution/modules/InferenceModule/dockerfiles/Dockerfile.vpuamd64 new file mode 100644 index 000000000..cb5394513 --- /dev/null +++ b/factory-ai-vision/EdgeSolution/modules/InferenceModule/dockerfiles/Dockerfile.vpuamd64 @@ -0,0 +1,82 @@ +#FROM mcr.microsoft.com/azureml/onnxruntime:latest-openvino-myriad +FROM waitingkuo/onnxruntime-vpu + +WORKDIR /app + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + libffi-dev \ + libgl1-mesa-glx \ + libgtk2.0-dev \ + libssl-dev \ + unzip \ + && rm -rf /var/lib/apt/lists/* +# libgl1-mesa-glx: opencv2 libGL.so error workaround + +RUN apt-get update \ + && DEBIAN_FRONTEND=noninteractive apt-get install -y locales \ + && sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen \ + && dpkg-reconfigure --frontend=noninteractive locales \ + && update-locale LANG=en_US.UTF-8 \ + && rm -rf /var/lib/apt/lists/* + +# TODO: Consider move env to the end of dockerfile +ENV LANG en_US.UTF-8 +ENV LC_ALL en_US.UTF-8 + +COPY requirements/base.txt ./requirements/base.txt +#ENV CONDA_ENV_NAME python38 +#RUN conda create --name python38 python=3.8.5 -y &&\ +# . activate python38 +#RUN [ "/bin/bash", "-c", "source activate python38 && pip install --upgrade pip"] +#RUN [ "/bin/bash", "-c", "source activate python38 && pip install -r requirements/base.txt --ignore-installed"] +#RUN [ "/bin/bash", "-c", "source activate python38 && pip install opencv-python onnxruntime" ] +RUN pip install --upgrade pip +RUN pip install -r requirements/base.txt --ignore-installed +RUN pip install opencv-python + +COPY coco_classes.txt ./ +COPY default_model default_model/ +COPY default_model_6parts default_model_6parts/ +COPY grpc_topology.json ./ +COPY http_topology.json ./ +COPY sample_video sample_video/ +COPY scenario_models scenario_models/ +RUN chmod 777 sample_video/video.mp4 +RUN chmod 777 default_model + +COPY api/__init__.py ./api/__init__.py +COPY api/models.py ./api/models.py +COPY arguments.py ./ +COPY config.py ./ +COPY exception_handler.py ./ +COPY extension_pb2.py ./ +COPY extension_pb2_grpc.py ./ +COPY http_inference_engine.py ./ +COPY img.png ./ +COPY inference_engine.py ./ +COPY inferencing_pb2.py ./ +COPY invoke.py ./ +COPY logging_conf/logging_config.py ./logging_conf/logging_config.py +COPY main.py ./ +COPY media_pb2.py ./ +COPY model_wrapper.py ./ +COPY object_detection.py ./ +COPY object_detection2.py ./ +COPY onnxruntime_predict.py ./ +COPY scenarios.py ./ +COPY server.py ./ +COPY shared_memory.py ./ +COPY sort.py ./ +COPY stream_manager.py ./ +COPY streams.py ./ +COPY tracker.py ./ +COPY utility.py ./ +EXPOSE 5558 +EXPOSE 5000 + +#COPY onnxruntime_openvino-1.5.2-cp38-cp38-linux_x86_64.whl ./ +#RUN [ "/bin/bash", "-c", "source activate python38 && pip install onnxruntime_openvino-1.5.2-cp38-cp38-linux_x86_64.whl"] + +#CMD [ "/bin/bash", "-c", "source activate python38 && python3 server.py -p 44000"] +CMD ["python", "server.py", "-p", "44000"] \ No newline at end of file diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/model_wrapper.py b/factory-ai-vision/EdgeSolution/modules/InferenceModule/model_wrapper.py index 205fa4d98..d0bd0992f 100644 --- a/factory-ai-vision/EdgeSolution/modules/InferenceModule/model_wrapper.py +++ b/factory-ai-vision/EdgeSolution/modules/InferenceModule/model_wrapper.py @@ -57,9 +57,9 @@ def __init__(self, cam_type="video_file", model_dir="./default_model"): self.is_gpu = onnxruntime.get_device() == "GPU" if self.is_gpu: - self.max_frame_rate = GPU_MAX_FRAME_RATE + self.max_total_frame_rate = GPU_MAX_FRAME_RATE else: - self.max_frame_rate = CPU_MAX_FRAME_RATE + self.max_total_frame_rate = CPU_MAX_FRAME_RATE self.update_frame_rate_by_number_of_streams(1) def set_is_scenario(self, is_scenario): @@ -74,9 +74,13 @@ def get_detection_mode(self): else: return self.detection_mode + def set_max_total_frame_rate(self, fps): + self.max_total_frame_rate = fps + print("[INFO] set max total frame rate as", fps, flush=True) + def update_frame_rate_by_number_of_streams(self, number_of_streams): if number_of_streams > 0: - self.frame_rate = max(1, int(self.max_frame_rate / number_of_streams)) + self.frame_rate = max(1, int(self.max_total_frame_rate / number_of_streams)) print("[INFO] set frame rate as", self.frame_rate, flush=True) else: print( @@ -87,9 +91,12 @@ def update_frame_rate_by_number_of_streams(self, number_of_streams): def get_recommended_frame_rate(self, number_of_streams): if number_of_streams > 0: - return max(1, int(self.max_frame_rate / number_of_streams)) + return max(1, int(self.max_total_frame_rate / number_of_streams)) else: - return self.max_frame_rate + return self.max_total_frame_rate + + def get_recommended_total_frame_rate(self): + return self.max_total_frame_rate def set_frame_rate(self, frame_rate): self.frame_rate = frame_rate diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/module.json b/factory-ai-vision/EdgeSolution/modules/InferenceModule/module.json index 431f62a61..8cc6eb4aa 100644 --- a/factory-ai-vision/EdgeSolution/modules/InferenceModule/module.json +++ b/factory-ai-vision/EdgeSolution/modules/InferenceModule/module.json @@ -4,10 +4,11 @@ "image": { "repository": "${CONTAINER_REGISTRY_NAME}/intelligentedge/inferencemodule", "tag": { - "version": "0.1.547", + "version": "0.1.549", "platforms": { "cpuamd64": "./dockerfiles/Dockerfile.cpuamd64", "gpuamd64": "./dockerfiles/Dockerfile.gpuamd64", + "vpuamd64": "./dockerfiles/Dockerfile.vpuamd64", "gpuarm64v8": "./dockerfiles/Dockerfile.gpuarm64v8", "cpuarm64v8": "./dockerfiles/Dockerfile.cpuarm64v8", "amd64": "./dockerfiles/Dockerfile.amd64", @@ -23,4 +24,4 @@ "contextPath": "./" }, "language": "python" -} \ No newline at end of file +} diff --git a/factory-ai-vision/EdgeSolution/modules/InferenceModule/server.py b/factory-ai-vision/EdgeSolution/modules/InferenceModule/server.py index cae186985..e72c49506 100644 --- a/factory-ai-vision/EdgeSolution/modules/InferenceModule/server.py +++ b/factory-ai-vision/EdgeSolution/modules/InferenceModule/server.py @@ -227,13 +227,12 @@ def update_cams(request_body: CamerasModel): Cameras not in List should not inferecence. """ logger.info(request_body) - fps = request_body.fps + frame_rate = request_body.fps stream_manager.update_streams([cam.id for cam in request_body.cameras]) n = stream_manager.get_streams_num_danger() # frame_rate = onnx.update_frame_rate_by_number_of_streams(n) # recommended_fps = onnx.get_recommended_frame_rate(n) - frame_rate = fps - onnx.set_frame_rate(fps) + onnx.set_frame_rate(frame_rate) logger.warning('update frame rate to {0}'.format(frame_rate)) # lva_mode @@ -383,7 +382,16 @@ def get_recommended_fps(number_of_cameras: int): Args: number_of_cameras (int): number_of_cameras """ - return onnx.get_recommended_frame_rate(number_of_cameras) + return {'fps': int(onnx.get_recommended_frame_rate(number_of_cameras))} + +@app.get("/get_recommended_total_fps") +def get_recommended_total_fps(): + """get_recommended_fps. + + Args: + number_of_cameras (int): number_of_cameras + """ + return {'fps': int(onnx.get_recommended_total_frame_rate())} # @app.route("/get_current_fps") @@ -498,7 +506,7 @@ def benchmark(): SCENARIO1_MODEL = "scenario_models/1" n_threads = 3 - n_images = 100 + n_images = 30 logger.info("============= BenchMarking (Begin) ==================") logger.info("--- Settings ----") logger.info("%s threads", n_threads) @@ -533,10 +541,16 @@ def _f(): threads[i].join() t1 = time.time() # print(t1-t0) + + discount = 0.75 + max_total_frame_rate = discount * (n_images * n_threads) / (t1-t0) + logger.info("---- Overall ----") logger.info("Processing %s images in %s seconds", n_images * n_threads, t1 - t0) logger.info(" Avg: %s ms per image", (t1 - t0) / (n_images * n_threads) * 1000) + logger.info(" Recommended Total FPS: %s", max_total_frame_rate) logger.info("============= BenchMarking (End) ==================") + onnx.set_max_total_frame_rate(max_total_frame_rate) def cvcapture_url(): @@ -638,10 +652,12 @@ def main(): else: logging.config.dictConfig(logging_config.LOGGING_CONFIG_DEV) + benchmark() + logger.info("is_edge: %s", is_edge()) + if is_edge(): main() else: logger.info("Assume running at local development.") local_main() - # benchmark() diff --git a/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/inference_modules/api/serializers.py b/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/inference_modules/api/serializers.py index cfd1ef71e..347377530 100644 --- a/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/inference_modules/api/serializers.py +++ b/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/inference_modules/api/serializers.py @@ -13,6 +13,8 @@ class InferenceModuleSerializer(serializers.ModelSerializer): """InferenceModuleSerializer""" + recommended_fps = serializers.IntegerField(required=False, read_only=True) + class Meta: model = InferenceModule fields = "__all__" diff --git a/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/inference_modules/models.py b/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/inference_modules/models.py index 06697d936..3e0556e7d 100644 --- a/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/inference_modules/models.py +++ b/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/inference_modules/models.py @@ -3,6 +3,7 @@ import logging +import requests from django.db import models logger = logging.getLogger(__name__) @@ -15,5 +16,16 @@ class InferenceModule(models.Model): url = models.CharField(max_length=1000, unique=True) is_gpu = models.BooleanField(default=False) + def recommended_fps(self) -> int: + try: + response = requests.get("http://" + self.url + "/recommended_fps") + result = int(response.json()["fps"]) + except Exception: + logger.exception( + "Get recommended_fps from inference module failed. Fallback to default" + ) + result = 10 + return result + def __str__(self): return self.name diff --git a/factory-ai-vision/EdgeSolution/modules/WebModule/module.json b/factory-ai-vision/EdgeSolution/modules/WebModule/module.json index 43865155b..c030acda0 100644 --- a/factory-ai-vision/EdgeSolution/modules/WebModule/module.json +++ b/factory-ai-vision/EdgeSolution/modules/WebModule/module.json @@ -4,7 +4,7 @@ "image": { "repository": "${CONTAINER_REGISTRY_NAME}/intelligentedge/webmodule", "tag": { - "version": "0.3.532", + "version": "0.3.533", "platforms": { "amd64": "./dockerfiles/Dockerfile.amd64", "amd64.backend_only": "./dockerfiles/Dockerfile.amd64.backend_only", diff --git a/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/components/AddCameraPanel.tsx b/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/components/AddCameraPanel.tsx index 7fc58f014..937f1aced 100644 --- a/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/components/AddCameraPanel.tsx +++ b/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/components/AddCameraPanel.tsx @@ -11,8 +11,9 @@ import { Link, } from '@fluentui/react'; import * as R from 'ramda'; -import { useDispatch, useSelector } from 'react-redux'; +import { connect, useDispatch } from 'react-redux'; import { createSelector } from '@reduxjs/toolkit'; +import { State } from 'RootStateType'; import { postCamera, putCamera } from '../store/cameraSlice'; import { selectAllLocations, getLocations, postLocation } from '../store/locationSlice'; @@ -23,7 +24,7 @@ export enum PanelMode { Update, } -type AddEditCameraPanelProps = { +type OwnProps = { isOpen: boolean; onDissmiss: () => void; mode: PanelMode; @@ -31,6 +32,10 @@ type AddEditCameraPanelProps = { cameraId?: number; }; +type AddEditCameraPanelProps = OwnProps & { + locationOptions: IDropdownOption[]; +}; + type FormData = { value: V; errMsg: string; @@ -55,24 +60,24 @@ const selectLocationOptions = createSelector(selectAllLocations, (locations) => })), ); -export const AddEditCameraPanel: React.FC = ({ +export const Component: React.FC = ({ isOpen, onDissmiss, mode, + locationOptions, initialValue = initialForm, cameraId, }) => { const [loading, setLoading] = useState(false); const [formData, setFormData] = useState
(initialValue); const [dialogHidden, setDialogHidden] = useState(true); - const locationOptions = useSelector(selectLocationOptions); const dispatch = useDispatch(); const validate = useCallback(() => { let hasError = false; Object.keys(formData).forEach((key) => { - if (!formData[key].value) { + if (!formData[key].value && formData[key].value !== 0) { setFormData(R.assocPath([key, 'errMsg'], `This field is required`)); hasError = true; } @@ -188,3 +193,12 @@ export const AddEditCameraPanel: React.FC = ({ ); }; + +const mapState = (state: State, ownProps: OwnProps): AddEditCameraPanelProps => { + return { + ...ownProps, + locationOptions: selectLocationOptions(state), + }; +}; + +export default connect(mapState)(Component); diff --git a/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/components/CreateByNameDialog.tsx b/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/components/CreateByNameDialog.tsx index 78e397021..d408be5d5 100644 --- a/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/components/CreateByNameDialog.tsx +++ b/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/components/CreateByNameDialog.tsx @@ -45,7 +45,7 @@ export const CreateByNameDialog: React.FC = ({ onDismiss={onDismiss} > - + diff --git a/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/components/Deployment.tsx b/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/components/Deployment.tsx index 6295c6eb3..287b865fa 100644 --- a/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/components/Deployment.tsx +++ b/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/components/Deployment.tsx @@ -52,7 +52,7 @@ import { EmptyAddIcon } from './EmptyAddIcon'; import { getTrainingProject } from '../store/trainingProjectSlice'; import { Insights } from './DeploymentInsights'; import { Instruction } from './Instruction'; -import { selectAllImages } from '../store/imageSlice'; +import { getImages, selectAllImages } from '../store/imageSlice'; import { initialProjectData } from '../store/project/projectReducer'; const { palette } = getTheme(); @@ -109,6 +109,9 @@ export const Deployment: React.FC = () => { useEffect(() => { dispatch(getTrainingProject(true)); + // The property `upload` would be changed after configure + // Re fetch the images to get the latest data + dispatch(getImages()); }, [dispatch]); useInterval( @@ -128,8 +131,8 @@ export const Deployment: React.FC = () => { dispatch(updateProjectData({ probThreshold: newValue }, false)); const saveProbThresholde = () => dispatch(updateProbThreshold()); - const updateModel = useCallback(() => { - dispatch(getConfigure(projectId)); + const updateModel = useCallback(async () => { + await dispatch(getConfigure(projectId)); }, [dispatch, projectId]); const commandBarItems: ICommandBarItemProps[] = useMemo(() => { @@ -196,14 +199,7 @@ export const Deployment: React.FC = () => { return ( <> - {!!newImagesCount && ( - - )} + @@ -278,6 +274,28 @@ export const Deployment: React.FC = () => { ); }; +// Extract this component so when every time the instruction being show, +// It will get the latest images +const UpdateModelInstruction = ({ newImagesCount, updateModel }) => { + const dispatch = useDispatch(); + + useEffect(() => { + dispatch(getImages()); + }, [dispatch]); + + if (newImagesCount) + return ( + + ); + + return null; +}; + type VideoAnnosControlsProps = { cameraId: number; }; diff --git a/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/components/__tests__/AddCameraPanel.spec.tsx b/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/components/__tests__/AddCameraPanel.spec.tsx new file mode 100644 index 000000000..a5f9d6253 --- /dev/null +++ b/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/components/__tests__/AddCameraPanel.spec.tsx @@ -0,0 +1,102 @@ +import userEvent from '@testing-library/user-event'; +import React from 'react'; +import { waitFor } from '@testing-library/react'; +import { IDropdownOption } from '@fluentui/react'; +import { render } from '../../testUtils/renderWithRedux'; +import { dummyFunction } from '../../utils/dummyFunction'; +import { PanelMode } from '../AddPartPanel'; +import { postCamera as mockPostCamera } from '../../store/cameraSlice'; +import { Component as AddCameraPanel } from '../AddCameraPanel'; +import { postLocation as mockPostLocation } from '../../store/locationSlice'; + +jest.mock('../../store/cameraSlice', () => ({ + ...jest.requireActual('../../store/cameraSlice'), + postCamera: jest.fn(), +})); + +jest.mock('../../store/locationSlice', () => ({ + ...jest.requireActual('../../store/locationSlice'), + getLocations: jest.fn().mockReturnValue({ type: 'test' }), + postLocation: jest.fn(), +})); + +test('should able to enter camera name, RTSP Url', () => { + const { getByLabelText } = render( + , + ); + + const testCamera = 'test camera'; + const testRtspUrl = 'rtsp://'; + + userEvent.type(getByLabelText(/camera name/i), testCamera); + userEvent.type(getByLabelText(/rtsp url/i), testRtspUrl); + + expect(getByLabelText(/camera name/i)).toHaveValue(testCamera); + expect(getByLabelText(/rtsp url/i)).toHaveValue(testRtspUrl); +}); + +test('should dispatch the right API in different mode', async () => { + (mockPostCamera as any).mockReturnValueOnce(dummyFunction); + const mockLocationOption: IDropdownOption = { + key: 0, + text: 'location1', + }; + const { getByLabelText, getByRole } = render( + , + ); + + const testCamera = 'test camera'; + const testRtspUrl = 'rtsp://'; + + userEvent.type(getByLabelText(/camera name/i), testCamera); + userEvent.type(getByLabelText(/rtsp url/i), testRtspUrl); + + // Pick a location + userEvent.click(getByRole('option')); + userEvent.click(getByRole('option', { name: mockLocationOption.text })); + + userEvent.click(getByRole('button', { name: /add/i })); + + await waitFor(() => { + expect(mockPostCamera).toHaveBeenCalledTimes(1); + expect(mockPostCamera).toHaveBeenCalledWith({ + name: testCamera, + rtsp: testRtspUrl, + location: mockLocationOption.key, + }); + }); +}); + +test('able to create a new location with the dialog', async () => { + const { getByTestId, getByRole, getByDisplayValue } = render( + , + ); + + userEvent.click(getByRole('button', { name: /create location/i })); + userEvent.type(getByTestId(/location-input/i), 'new location'); + + expect(getByDisplayValue('new location')).not.toBeNull(); +}); + +test('should dispatch right action when pressing create button', async () => { + const { getByRole, getByTestId } = render( + , + ); + (mockPostLocation as any).mockReturnValueOnce(() => Promise.resolve({ payload: { id: 0 } })); + + userEvent.click(getByRole('button', { name: /create location/i })); + + const mockLocationName = 'new location'; + userEvent.type(getByTestId(/location-input/i), mockLocationName); + userEvent.click(getByRole('button', { name: 'Create' })); + + await waitFor(() => { + expect(mockPostLocation).toHaveBeenCalledTimes(1); + expect(mockPostLocation).toHaveBeenCalledWith({ name: mockLocationName }); + }); +}); diff --git a/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/pages/CameraDetails.tsx b/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/pages/CameraDetails.tsx index 2c59f8c83..11b2eeca0 100644 --- a/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/pages/CameraDetails.tsx +++ b/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/pages/CameraDetails.tsx @@ -22,7 +22,7 @@ import { useQuery } from '../hooks/useQuery'; import { selectCameraById, getCameras, deleteCamera } from '../store/cameraSlice'; import { RTSPVideo } from '../components/RTSPVideo'; import { thunkGetProject } from '../store/project/projectActions'; -import { AddEditCameraPanel, PanelMode } from '../components/AddCameraPanel'; +import AddCameraPanel, { PanelMode } from '../components/AddCameraPanel'; import { selectLocationById } from '../store/locationSlice'; import LabelingPage from '../components/LabelingPage/LabelingPage'; import { captureImage } from '../store/imageSlice'; @@ -115,7 +115,7 @@ export const CameraDetails: React.FC = () => { - { - const [isPanelOpen, setPanelOpen] = useState(false); + const [isPanelOpen, { setTrue: openPanel, setFalse: dismissPanel }] = useBoolean(false); const showInstruction = useSelector( (state: State) => state.camera.nonDemo.length > 0 && state.labelImages.ids.length === 0, ); - const dismissPanel = useConstCallback(() => setPanelOpen(false)); - const openPanel = useConstCallback(() => setPanelOpen(true)); - const commandBarItems: ICommandBarItemProps[] = useMemo( () => [ { @@ -50,7 +47,7 @@ export const Cameras: React.FC = () => { - + ); }; diff --git a/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/store/__test__/project.spec.ts b/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/store/__test__/project.spec.ts index 2d4a8b356..32ad0e686 100644 --- a/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/store/__test__/project.spec.ts +++ b/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/store/__test__/project.spec.ts @@ -40,6 +40,8 @@ describe('Post project', () => { send_video_to_cloud: initialProjectData.sendVideoToCloud, inference_mode: initialProjectData.inferenceMode, fps: 10, + inference_protocol: 'grpc', + send_video_to_cloud: [], }, }); }); diff --git a/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/store/project/projectActions.ts b/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/store/project/projectActions.ts index 7172fa0a9..36c2f0013 100644 --- a/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/store/project/projectActions.ts +++ b/factory-ai-vision/EdgeSolution/modules/WebModule/ui/src/store/project/projectActions.ts @@ -183,7 +183,7 @@ export const thunkGetProject = (): ProjectThunk => (dispatch): Promise .then((results) => { const partDetection = results[0].data; const infModuleIdx = results[1].data.findIndex((e) => e.id === partDetection[0].inference_module); - const totalRecomendedFps = results[1].data[infModuleIdx]?.is_gpu ? 30 : 10; + const totalRecomendedFps = results[1].data[infModuleIdx]?.recommended_fps; const recomendedFps = Math.floor(totalRecomendedFps / (partDetection[0].cameras?.length || 1)); const project: ProjectData = { diff --git a/factory-ai-vision/Installer/acs.zip b/factory-ai-vision/Installer/acs.zip index 900c35214..1ad3f709b 100644 Binary files a/factory-ai-vision/Installer/acs.zip and b/factory-ai-vision/Installer/acs.zip differ diff --git a/factory-ai-vision/Installer/deployment.amd64.json b/factory-ai-vision/Installer/deployment.amd64.json index e32794461..3600028c0 100644 --- a/factory-ai-vision/Installer/deployment.amd64.json +++ b/factory-ai-vision/Installer/deployment.amd64.json @@ -37,7 +37,7 @@ "status": "running", "restartPolicy": "always", "settings": { - "image": "intelligentedge/nginxmodule:0.1.30-amd64", + "image": "intelligentedge/nginxmodule:0.1.34-amd64", "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"8181/tcp\":[{\"HostPort\":\"8181\"}]}}}" } }, @@ -67,7 +67,7 @@ } }, "settings": { - "image": "intelligentedge/webmodule:0.1.30-amd64", + "image": "intelligentedge/webmodule:0.1.34-amd64", "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"8000/tcp\":[{\"HostPort\":\"8000\"}]}}}" } }, @@ -88,7 +88,7 @@ } }, "settings": { - "image": "intelligentedge/webdbmodule:0.1.30-amd64", + "image": "intelligentedge/webdbmodule:0.1.34-amd64", "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"5432/tcp\":[{\"HostPort\":\"5432\"}]}}}" } }, @@ -103,7 +103,7 @@ } }, "settings": { - "image": "intelligentedge/inferencemodule:0.1.30-amd64", + "image": "intelligentedge/inferencemodule:0.1.34-amd64", "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"5000/tcp\":[{\"HostPort\":\"5000\"}],\"44000/tcp\":[{\"HostPort\":\"44000\"}],\"5558/tcp\":[{\"HostPort\":\"5558\"}]},\"IpcMode\":\"host\",\"runtime\":\"\"}}" } }, @@ -123,7 +123,7 @@ "status": "running", "restartPolicy": "always", "settings": { - "image": "intelligentedge/rtspsimmodule:0.1.30-amd64", + "image": "intelligentedge/rtspsimmodule:0.1.34-amd64", "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"554/tcp\":[{\"HostPort\":\"554\"}]}}}" } } diff --git a/factory-ai-vision/Installer/deployment.opencv.amd64.json b/factory-ai-vision/Installer/deployment.opencv.amd64.json index 0943b8e68..8a330d959 100644 --- a/factory-ai-vision/Installer/deployment.opencv.amd64.json +++ b/factory-ai-vision/Installer/deployment.opencv.amd64.json @@ -37,7 +37,7 @@ "status": "running", "restartPolicy": "always", "settings": { - "image": "intelligentedge/nginxmodule:0.1.30-amd64", + "image": "intelligentedge/nginxmodule:0.1.34-amd64", "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"8181/tcp\":[{\"HostPort\":\"8181\"}]}}}" } }, @@ -67,7 +67,7 @@ } }, "settings": { - "image": "intelligentedge/webmodule:0.1.30-amd64", + "image": "intelligentedge/webmodule:0.1.34-amd64", "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"8000/tcp\":[{\"HostPort\":\"8000\"}]}}}" } }, @@ -88,7 +88,7 @@ } }, "settings": { - "image": "intelligentedge/webdbmodule:0.1.30-amd64", + "image": "intelligentedge/webdbmodule:0.1.34-amd64", "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"5432/tcp\":[{\"HostPort\":\"5432\"}]}}}" } }, @@ -106,7 +106,7 @@ } }, "settings": { - "image": "intelligentedge/inferencemodule:0.1.30-amd64", + "image": "intelligentedge/inferencemodule:0.1.34-amd64", "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"5000/tcp\":[{\"HostPort\":\"5000\"}],\"44000/tcp\":[{\"HostPort\":\"44000\"}],\"5558/tcp\":[{\"HostPort\":\"5558\"}]},\"IpcMode\":\"host\",\"runtime\":\"\"}}" } }, @@ -116,7 +116,7 @@ "status": "running", "restartPolicy": "always", "settings": { - "image": "intelligentedge/cameramodule:0.1.30-amd64", + "image": "intelligentedge/cameramodule:0.1.34-amd64", "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"9000/tcp\":[{\"HostPort\":\"9000\"}],\"5559/tcp\":[{\"HostPort\":\"5559\"}]},\"runtime\":\"runc\"}}" } }, @@ -126,7 +126,7 @@ "status": "running", "restartPolicy": "always", "settings": { - "image": "intelligentedge/rtspsimmodule:0.1.30-amd64", + "image": "intelligentedge/rtspsimmodule:0.1.34-amd64", "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"554/tcp\":[{\"HostPort\":\"554\"}]}}}" } } diff --git a/factory-ai-vision/Installer/factory-ai-vision-install.sh b/factory-ai-vision/Installer/factory-ai-vision-install.sh index ad0e1a9c9..236736f5d 100644 --- a/factory-ai-vision/Installer/factory-ai-vision-install.sh +++ b/factory-ai-vision/Installer/factory-ai-vision-install.sh @@ -14,6 +14,37 @@ edgeDeployJson=deploy.modules.json # the solution resource group name rgName=visiononedge-rg +now=`date +"%Y_%m_%d_%H_%M_%S"` + +if [ -d "factoryai_configs" ]; then + while true; do + read -p "Do you want to use the existing config files? (y or n): " -n 1 -r; echo + case $REPLY in + [Yy]* ) isCfg=true; break;; + [Nn]* ) isCfg=false; break;; + * ) echo "Please answer yes or no.";; + esac + done +fi + +if [ "$isCfg" = true ]; then + PS3='Choose the number corisponding to the Azure Stack Edge device: ' + configs=`ls factoryai_configs` + select opt in $configs + do + echo "you chose: " $opt + if [ "$opt" != "" ]; then + break + fi + done + + source factoryai_configs/$opt + echo Read from config ... + echo '################################################' + cat factoryai_configs/$opt + echo '################################################' +fi + # azSubscriptonName = The friendly name of the Azure subscription # iotHubName = The IoT Hub that corisponds to the ASE device @@ -74,291 +105,296 @@ echo Deleting conflict Extension #done #az account set --subscription "$azSubscriptonName" --only-show-errors -################################ Install Custom Vision ########################### - -echo You can use your existing Custom Vision service, or create a new one - while true; do - read -p "Would you like to use an existing Custom Vision Service? (y or n): " -n 1 -r; echo - case $REPLY in - [Yy]* ) read -p "Please enter your Custom Vision endpoint: " cvTrainingEndpoint; echo - read -p "Please enter your Custom Vision Key: " cvTrainingApiKey; echo - if [[ -z $cvTrainingEndpoint ]]; then - cvTrainingEndpoint='' - fi - if [[ -z $cvTrainingApiKey ]]; then - cvTrainingApiKey='' - fi - break;; - [Nn]* ) cvTrainingEndpoint=""; break;; - * ) echo "Please answer yes or no.";; - esac - done +if [ "$isCfg" != true ]; then -if [ "$cvTrainingEndpoint" == "" ]; then - echo Installing the Custom Vision Service - echo - loc=() - loc+=("eastus") - loc+=("westus2") - loc+=("southcentralus") - loc+=("northcentralus") - - PS3='Choose the location: ' - select opt in "${loc[@]}" - do - echo "you chose: " $opt - location=${opt%$CR} - break - done + ################################ Install Custom Vision ########################### - echo Creating resource group - $rgName - output=$(az group create -l $location -n $rgName) + echo You can use your existing Custom Vision service, or create a new one + while true; do + read -p "Would you like to use an existing Custom Vision Service? (y or n): " -n 1 -r; echo + case $REPLY in + [Yy]* ) read -p "Please enter your Custom Vision endpoint: " cvTrainingEndpoint; echo + read -p "Please enter your Custom Vision Key: " cvTrainingApiKey; echo + if [[ -z $cvTrainingEndpoint ]]; then + cvTrainingEndpoint='' + fi + if [[ -z $cvTrainingApiKey ]]; then + cvTrainingApiKey='' + fi + break;; + [Nn]* ) cvTrainingEndpoint=""; break;; + * ) echo "Please answer yes or no.";; + esac + done - echo Creating Custom Vision Service + if [ "$cvTrainingEndpoint" == "" ]; then + echo Installing the Custom Vision Service + echo + loc=() + loc+=("eastus") + loc+=("westus2") + loc+=("southcentralus") + loc+=("northcentralus") + + PS3='Choose the location: ' + select opt in "${loc[@]}" + do + echo "you chose: " $opt + location=${opt%$CR} + break + done - outputarrcv=() - # Need to note in the documentation that only one free service per subscription can be created. An existing one results in an error. - output="$(az deployment group create --resource-group $rgName --template-file $customVisionArm --query properties.outputs.*.value -o table --parameters "{ 'location': { 'value': '$location' } }")" - let cnt=0 - while read -r line - do - if [ $cnt -gt 1 ]; then - outputarrcv+=("$line") - fi - let cnt++ - done <<< "$output" - - # get length of an array - tLen=${#outputarrcv[@]} - - if [ $tLen -eq 0 ]; then - echo - echo Deployment failed. Please check if you already have a free version of Custom Vision installed. - read -p "Press key to exit..." - exit 1 - fi - - # the Custom Vision variables - cvTrainingApiKey=${outputarrcv[0]} - cvTrainingEndpoint=${outputarrcv[1]} - - echo API Key: $cvTrainingApiKey - echo Endpoint: $cvTrainingEndpoint -fi + echo Creating resource group - $rgName + output=$(az group create -l $location -n $rgName) + + echo Creating Custom Vision Service + outputarrcv=() + # Need to note in the documentation that only one free service per subscription can be created. An existing one results in an error. + output="$(az deployment group create --resource-group $rgName --template-file $customVisionArm --query properties.outputs.*.value -o table --parameters "{ 'location': { 'value': '$location' } }")" + let cnt=0 + while read -r line + do + if [ $cnt -gt 1 ]; then + outputarrcv+=("$line") + fi + let cnt++ + done <<< "$output" + + # get length of an array + tLen=${#outputarrcv[@]} + + if [ $tLen -eq 0 ]; then + echo + echo Deployment failed. Please check if you already have a free version of Custom Vision installed. + read -p "Press key to exit..." + exit 1 + fi + + # the Custom Vision variables + cvTrainingApiKey=${outputarrcv[0]} + cvTrainingEndpoint=${outputarrcv[1]} + + echo API Key: $cvTrainingApiKey + echo Endpoint: $cvTrainingEndpoint + fi -# ############################## Get Streaming Type ##################################### -while true; do - read -p "Do you want to use Azure Live Video Analytics? (y or n): " -n 1 -r; echo - case $REPLY in - [Yy]* ) streaming="lva"; break;; - [Nn]* ) streaming="opencv"; break;; - * ) echo "Please answer yes or no.";; - esac -done -# ############################## Get Azure Media SErvice ##################################### + # ############################## Get Streaming Type ##################################### + while true; do + read -p "Do you want to use Azure Live Video Analytics? (y or n): " -n 1 -r; echo + case $REPLY in + [Yy]* ) streaming="lva"; break;; + [Nn]* ) streaming="opencv"; break;; + * ) echo "Please answer yes or no.";; + esac + done + + # ############################## Get Azure Media SErvice ##################################### + + if [ $streaming == "lva" ]; then + echo listing Azure Media Services + outputams=$(az ams account list --only-show-errors -o table --query [].name) + outputarrams=() + let cnt=0 + while read -r line + do + if [ $cnt -gt 1 ]; then + outputarrams+=("$line") + fi + let cnt++ + done <<< "$outputams" + + # get length of an array + tLen=${#outputarrams[@]} + + if [ $tLen -le 0 ]; then + echo Azure Media Services not found + echo Sorry, this demo requires that you have an existing Azure Media Services + echo Please following this documentation to create it first + echo https://docs.microsoft.com/en-us/azure/media-services/latest/create-account-howto?tabs=portal + read -p "Press key to exit..."; echo + exit 1 + fi + # Only one option so no need to prompt for choice + if [ $tLen -le 1 ]; then + while true; do + read -p "please confirm install to ${outputarrams[0]%$CR} ams (y or n): " -n 1 -r;echo + case $REPLY in + [Yy]* ) break;; + [Nn]* ) exit;; + * ) echo "Please answer yes or no.";; + esac + done + amsServiceName=${outputarrams[0]%$CR} + else + PS3='Choose the number corresponding to your Azure Medis Service ' + select opt in "${outputarrams[@]}" + do + echo "you chose: " $opt + amsServiceName=${opt%$CR} + break + done + fi + + amsResourceGroup=$(az ams account list --only-show-errors -o tsv --query '[].[name,resourceGroup]' | grep $amsServiceName | awk '{print $2}') + + amsServicePrincipalName=factoryai_$now + outputams=$(az ams account sp create --name $amsServicePrincipalName --account-name $amsServiceName --resource-group $amsResourceGroup --query '[SubscriptionId, AadTenantId, AadClientId, AadSecret]' -o tsv) + outputamsarr=() + while read -r line + do + outputamsarr+=("$line") + done <<< "$outputams" + + amsSubscriptionId=${outputamsarr[0]} + amsTenantId=${outputamsarr[1]} + amsServicePrincipalAppId=${outputamsarr[2]} + amsServicePrincipalSecret=${outputamsarr[3]} + + isAmsServicePrincipalCreated=True + if [[ $amsServicePrincipalSecret == Cannot* ]]; then + isAmsServicePrincipalCreated=False + echo "AMS Service Principal '$amsServicePrincipalName' exists" + echo "Please enter your Principal Secret for 'factoryai'" + read amsServicePrincipalSecret + fi + + if [[ $isAmsServicePrincipalCreated == True ]]; then + echo "New Azure Media Service Priniple '$amsServicePrincipalName' is created" + echo "***************************************************************************" + echo "*** Please copy your SERVICE_PRINCIPAL_SECRET, it cannot be shown again ***" + echo "***************************************************************************" + echo "============================================================" + echo "SUBSCRIPTION_ID :" $amsSubscriptionId + echo "RESOURCE_GROUP :" $amsResourceGroup + echo "TENANT_ID :" $amsTenantId + echo "SERVICE_NAME :" $amsServiceName + echo "SERVICE_PRINCIPAL_NAME :" $amsServicePrincipalName + echo "SERVICE_PRINCIPAL_APP_ID :" $amsServicePrincipalAppId + echo "SERVICE_PRINCIPAL_SECRET :" $amsServicePrincipalSecret + echo "============================================================" + read -p "Press any key to continue..." + fi + + echo Azure Media Service Parameters: + echo "============================================================" + echo "SUBSCRIPTION_ID :" $amsSubscriptionId + echo "RESOURCE_GROUP :" $amsResourceGroup + echo "TENANT_ID :" $amsTenantId + echo "SERVICE_NAME :" $amsServiceName + echo "SERVICE_PRINCIPAL_NAME :" $amsServicePrincipalName + echo "SERVICE_PRINCIPAL_APP_ID :" $amsServicePrincipalAppId + echo "SERVICE_PRINCIPAL_SECRET :" $amsServicePrincipalSecret + echo "============================================================" + fi # if [ $streaming == "lva" ]; then -if [ $streaming == "lva" ]; then - echo listing Azure Media Services - outputams=$(az ams account list --only-show-errors -o table --query [].name) - outputarrams=() + # ############################## Get IoT Hub ##################################### + + echo listing IoT Hubs + outputhubs=$(az iot hub list --only-show-errors -o table --query [].name) + outputarrhubs=() let cnt=0 while read -r line do if [ $cnt -gt 1 ]; then - outputarrams+=("$line") + outputarrhubs+=("$line") fi let cnt++ - done <<< "$outputams" + done <<< "$outputhubs" # get length of an array - tLen=${#outputarrams[@]} + tLen=${#outputarrhubs[@]} if [ $tLen -le 0 ]; then - echo Azure Media Services not found - echo Sorry, this demo requires that you have an existing Azure Media Services - echo Please following this documentation to create it first - echo https://docs.microsoft.com/en-us/azure/media-services/latest/create-account-howto?tabs=portal + echo IoTHub not found + echo Sorry, this demo requires that you have an existing IoTHub and registered Azure Stack Edge Device read -p "Press key to exit..."; echo exit 1 fi # Only one option so no need to prompt for choice if [ $tLen -le 1 ]; then while true; do - read -p "please confirm install to ${outputarrams[0]%$CR} ams (y or n): " -n 1 -r;echo + read -p "please confirm install to ${outputarrhubs[0]%$CR} hub (y or n): " -n 1 -r;echo case $REPLY in [Yy]* ) break;; [Nn]* ) exit;; * ) echo "Please answer yes or no.";; esac done - amsServiceName=${outputarrams[0]%$CR} + iotHubName=${outputarrhubs[0]%$CR} else - PS3='Choose the number corresponding to your Azure Medis Service ' - select opt in "${outputarrams[@]}" + PS3='Choose the number corisponding to the IoTHub managing your target edge device: ' + select opt in "${outputarrhubs[@]}" do echo "you chose: " $opt - amsServiceName=${opt%$CR} + iotHubName=${opt%$CR} break done fi - amsResourceGroup=$(az ams account list --only-show-errors -o tsv --query '[].[name,resourceGroup]' | grep $amsServiceName | awk '{print $2}') + iotHubConnectionString=$(az iot hub connection-string show --hub-name $iotHubName -o tsv) + echo "Got IoTHub Connection: " $iotHubConnectionString - amsServicePrincipalName=factoryai - outputams=$(az ams account sp create --name $amsServicePrincipalName --account-name $amsServiceName --resource-group $amsResourceGroup --query '[SubscriptionId, AadTenantId, AadClientId, AadSecret]' -o tsv) - outputamsarr=() - while read -r line - do - outputamsarr+=("$line") - done <<< "$outputams" - - amsSubscriptionId=${outputamsarr[0]} - amsTenantId=${outputamsarr[1]} - amsServicePrincipalAppId=${outputamsarr[2]} - amsServicePrincipalSecret=${outputamsarr[3]} - - isAmsServicePrincipalCreated=True - if [[ $amsServicePrincipalSecret == Cannot* ]]; then - isAmsServicePrincipalCreated=False - echo "AMS Service Principal '$amsServicePrincipalName' exists" - echo "Please enter your Principal Secret for 'factoryai'" - read amsServicePrincipalSecret - fi - if [[ $isAmsServicePrincipalCreated == True ]]; then - echo "New Azure Media Service Priniple '$amsServicePrincipalName' is created" - echo "***************************************************************************" - echo "*** Please copy your SERVICE_PRINCIPAL_SECRET, it cannot be shown again ***" - echo "***************************************************************************" - echo "============================================================" - echo "SUBSCRIPTION_ID :" $amsSubscriptionId - echo "RESOURCE_GROUP :" $amsResourceGroup - echo "TENANT_ID :" $amsTenantId - echo "SERVICE_NAME :" $amsServiceName - echo "SERVICE_PRINCIPAL_NAME :" $amsServicePrincipalName - echo "SERVICE_PRINCIPAL_APP_ID :" $amsServicePrincipalAppId - echo "SERVICE_PRINCIPAL_SECRET :" $amsServicePrincipalSecret - echo "============================================================" - read -p "Press any key to continue..." - fi + # ############################## Get Device ##################################### - echo Azure Media Service Parameters: - echo "============================================================" - echo "SUBSCRIPTION_ID :" $amsSubscriptionId - echo "RESOURCE_GROUP :" $amsResourceGroup - echo "TENANT_ID :" $amsTenantId - echo "SERVICE_NAME :" $amsServiceName - echo "SERVICE_PRINCIPAL_NAME :" $amsServicePrincipalName - echo "SERVICE_PRINCIPAL_APP_ID :" $amsServicePrincipalAppId - echo "SERVICE_PRINCIPAL_SECRET :" $amsServicePrincipalSecret - echo "============================================================" -fi # if [ $streaming == "lva" ]; then - -# ############################## Get IoT Hub ##################################### - -echo listing IoT Hubs -outputhubs=$(az iot hub list --only-show-errors -o table --query [].name) -outputarrhubs=() -let cnt=0 -while read -r line -do - if [ $cnt -gt 1 ]; then - outputarrhubs+=("$line") - fi - let cnt++ -done <<< "$outputhubs" - -# get length of an array -tLen=${#outputarrhubs[@]} - -if [ $tLen -le 0 ]; then - echo IoTHub not found - echo Sorry, this demo requires that you have an existing IoTHub and registered Azure Stack Edge Device - read -p "Press key to exit..."; echo - exit 1 -fi -# Only one option so no need to prompt for choice -if [ $tLen -le 1 ]; then - while true; do - read -p "please confirm install to ${outputarrhubs[0]%$CR} hub (y or n): " -n 1 -r;echo - case $REPLY in - [Yy]* ) break;; - [Nn]* ) exit;; - * ) echo "Please answer yes or no.";; - esac - done - iotHubName=${outputarrhubs[0]%$CR} -else - PS3='Choose the number corisponding to the IoTHub managing your target edge device: ' - select opt in "${outputarrhubs[@]}" - do - echo "you chose: " $opt - iotHubName=${opt%$CR} - break - done -fi + echo getting devices + # query parameter retrieves only edge devices + output=$(az iot hub device-identity list -n $iotHubName -o table --query [?capabilities.iotEdge].[deviceId]) + let cnt=0 + outputarrdevs=() + while read -r line + do + # strip off column name and ------- + if [ $cnt -gt 1 ]; then + outputarrdevs+=("$line") + fi + let cnt++ + done <<< "$output" -iotHubConnectionString=$(az iot hub connection-string show --hub-name $iotHubName -o tsv) -echo "Got IoTHub Connection: " $iotHubConnectionString + # get length of an array + tLen=${#outputarrdevs[@]} + if [ $tLen -le 0 ]; then + echo No edge device found + echo Sorry, this demo requires that you have an existing IoTHub and registered Azure Stack Edge Device + read -p "Press any key to exit..."; echo + exit 1 + fi + # Only one option so no need to prompt for choice + if [ $tLen -le 1 ]; then + while true; do + read -p "please confirm install to ${outputarrdevs[0]%$CR} device (y or n): " -n 1 -r;echo + case $REPLY in + [Yy]* ) break;; + [Nn]* ) exit;; + * ) echo "Please answer yes or no.";; + esac + done + edgeDeviceId=${outputarrdevs[0]%$CR} + else + PS3='Choose the number corisponding to the Azure Stack Edge device: ' + select opt in "${outputarrdevs[@]}" + do + echo "you chose: " $opt + edgeDeviceId=${opt%$CR} + break + done + fi -# ############################## Get Device ##################################### -echo getting devices -# query parameter retrieves only edge devices -output=$(az iot hub device-identity list -n $iotHubName -o table --query [?capabilities.iotEdge].[deviceId]) -let cnt=0 -outputarrdevs=() -while read -r line -do - # strip off column name and ------- - if [ $cnt -gt 1 ]; then - outputarrdevs+=("$line") - fi - let cnt++ -done <<< "$output" - -# get length of an array -tLen=${#outputarrdevs[@]} - -if [ $tLen -le 0 ]; then - echo No edge device found - echo Sorry, this demo requires that you have an existing IoTHub and registered Azure Stack Edge Device - read -p "Press any key to exit..."; echo - exit 1 -fi -# Only one option so no need to prompt for choice -if [ $tLen -le 1 ]; then - while true; do - read -p "please confirm install to ${outputarrdevs[0]%$CR} device (y or n): " -n 1 -r;echo - case $REPLY in - [Yy]* ) break;; - [Nn]* ) exit;; - * ) echo "Please answer yes or no.";; - esac - done - edgeDeviceId=${outputarrdevs[0]%$CR} -else - PS3='Choose the number corisponding to the Azure Stack Edge device: ' - select opt in "${outputarrdevs[@]}" - do - echo "you chose: " $opt - edgeDeviceId=${opt%$CR} - break - done -fi + ################################ Check for GPU ########################################### + while true; do + read -p "Does your device have a GPU? (y or n): " -n 1 -r; echo + case $REPLY in + [Yy]* ) cpuGpu="gpu"; runtime="nvidia"; break;; + [Nn]* ) cpuGpu="cpu"; runtime="runc" ; break;; + * ) echo "Please answer yes or no.";; + esac + done -################################ Check for GPU ########################################### -while true; do - read -p "Does your device have a GPU? (y or n): " -n 1 -r; echo - case $REPLY in - [Yy]* ) cpuGpu="gpu"; runtime="nvidia"; break;; - [Nn]* ) cpuGpu="cpu"; runtime="runc" ; break;; - * ) echo "Please answer yes or no.";; - esac -done +fi #if [ $isCfg != true ]; then ################################ Check for Platform ########################################### #echo 1 amd64 @@ -369,7 +405,7 @@ done #else # edgeDeploymentJson=deployment.amd64.json #fi -if [ $streaming == "lva" ]; then +if [ "$streaming" == "lva" ]; then edgeDeploymentJson=deployment.amd64.json else edgeDeploymentJson=deployment.opencv.amd64.json @@ -396,11 +432,32 @@ do echo $prtline done < "$input" > ./$edgeDeployJson +if [ "$isCfg" != true ]; then + mkdir -p factoryai_configs + factoryaiConfigName=factoryai_configs/factoryai_"$edgeDeviceId"_"$cpuGpu"_"$streaming"_"$now".cfg + echo cvTrainingEndpoint='"'$cvTrainingEndpoint'"' >> $factoryaiConfigName + echo cvTrainingApiKey='"'$cvTrainingApiKey'"' >> $factoryaiConfigName + echo cpuGpu='"'$cpuGpu'"' >> $factoryaiConfigName + echo runtime='"'$runtime'"' >> $factoryaiConfigName + echo streaming='"'$streaming'"' >> $factoryaiConfigName + echo iotHubName='"'$iotHubName'"' >> $factoryaiConfigName + echo iotHubConnectionString='"'$iotHubConnectionString'"' >> $factoryaiConfigName + echo amsSubscriptionId='"'$amsSubscriptionId'"' >> $factoryaiConfigName + echo amsServiceName='"'$amsServiceName'"' >> $factoryaiConfigName + echo amsResourceGroup='"'$amsResourceGroup'"' >> $factoryaiConfigName + echo amsTanantId='"'$amsTenantId'"' >> $factoryaiConfigName + echo amsServicePrincipalName='"'$amsServicePrincipalName'"' >> $factoryaiConfigName + echo amsServicePrincipalSecret='"'$amsServicePrincipalName'"' >> $factoryaiConfigName + echo edgeDeviceId='"'$edgeDeviceId'"' >> $factoryaiConfigName +fi + + # ############################## Deploy Edge Modules ##################################### echo Deploying conatiners to Azure Stack Edge echo This will take more than 10 min at normal connection speeds. Status can be checked on the Azure Stack Edge device +#echo az iot edge set-modules --device-id $edgeDeviceId --hub-name $iotHubName --content $edgeDeployJson output=$(az iot edge set-modules --device-id $edgeDeviceId --hub-name $iotHubName --content $edgeDeployJson) echo "installation complete" diff --git a/factory-ai-vision/Readme.md b/factory-ai-vision/Readme.md index e8d1b8870..9e7b88ede 100644 --- a/factory-ai-vision/Readme.md +++ b/factory-ai-vision/Readme.md @@ -1,20 +1,27 @@ -| description | products | page_type | description | urlFragment | -| -------------------------- | --------------------------------------------------------------------- | --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------- | -| This is an easy-to-use UI solution showing how to realize a your own machine learning solution concept in a single day without requiring any Machine Learning expertise, run with hardware accleration on edge with retraining loop.| - azure Stack
-Custom Vision
-Onnxruntime
-azure-iot-edge | sample solution | -json
-python
-javascript | custom-vision-azure-iot | +| description | products | page_type | description | +| -------------------------- | --------------------------------------------------------------------- | --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| This is an easy-to-use UI solution showing how to realize a your own machine learning solution concept in a single day without requiring any Machine Learning expertise, run with hardware accleration on edge with retraining loop.| - azure Stack
-Custom Vision
-Onnxruntime
-azure-iot-edge | sample solution | -json
-python
-javascript | -# Custom vision + Azure IoT Edge for Factory AI - -This is a sample showing how to deploy a Custom Vision model to Azure IoT edge device and get Machine learning solution up and running in a single day. -You can define your location, camera and set up objects to detect example: any manufacturing parts, defected parts, etc. while keeping your video footage private, lowering your badnwidth costs and even running everything offline. We use onnxruntime to acclerate your models on edge device using Open Vino for CPU and TensorRT for Nvidia GPU. +# Vision on Edge Solution +This is a solution showing how to deploy a Custom Vision model to Azure IoT edge device and get Machine learning solution up and running in a single day. +You can define your location, camera and set up objects to detect example: any manufacturing parts, defected parts, etc. while keeping your video footage private, lowering your badnwidth costs and even running everything offline. We use onnxruntime to acclerate your models on edge device using Open Vino for CPU and TensorRT for Nvidia GPU and Arm64 GPU. This solution is capable of processing multiple cameras with Microsoft LVA and openCV. Check out [this video](https://lnkd.in/grQKBN8) to see brief introduction in action and understand how the value is delievered: [![video](https://github.com/Azure-Samples/azure-intelligent-edge-patterns/blob/master/factory-ai-vision/assets/Ignite.JPG)](https://lnkd.in/grQKBN8) +## Product +- azure Stack Edge: Learn more [here](https://azure.microsoft.com/en-us/products/azure-stack/edge/) +- Custom Visio: Learn more [here](https://azure.microsoft.com/en-us/services/cognitive-services/custom-vision-service/) +- Onnxruntime
+- Azure-iot-edge
+- OpenVINO/cpu
+- TensorRT for Nvidia/gpu
+- Arm64/cpu
@@ -35,7 +42,7 @@ Check out [this video](https://lnkd.in/grQKBN8) to see brief introduction in act # Prerequiste ## Hardware You need to have one of the following: -- **Azure Stack Edge**: A portfolio of devices that bring the compute, storage and intelligence to the edge right where data is created +- **Azure Stack Edge**: A portfolio of devices that bring the compute, storage and intelligence to the edge right where data is created. Find out more [here](https://azure.microsoft.com/en-us/products/azure-stack/edge/) - Please ensure that you have compute configured and you can run [GPU getting started module here](https://docs.microsoft.com/en-us/azure/databox-online/azure-stack-edge-gpu-deploy-sample-module-marketplace) or @@ -49,37 +56,26 @@ or Check out the architecture below to see how Vision on Edge works on both LVA and OpenCV module. You can also get more details through this tutorial to see how a IoT Edge deployment works. You must have the following services set up to use this solution: # Architecture -### LVA Module +### LVA Module (Recommended) ![arch_img](https://github.com/linkernetworks/azure-intelligent-edge-patterns/raw/develop/factory-ai-vision/assets/lva.png) ### OpenCV Module ![arch_img](https://github.com/linkernetworks/azure-intelligent-edge-patterns/raw/develop/factory-ai-vision/assets/opencv.png) -## Get Started - -To install the Vision on Edge Solution Accelerator, the following prerequisites are required: -1. You must have an Azure subscription. -
if you don’t have one, you can create one here: https://azure.microsoft.com/en-us/pricing/purchase-options/pay-as-you-go/ -2. That subscription must contain an IoT Hub with a registered IoT Edge device (generally this will be an Azure Stack Edge Device). -
At least one IoT Edge with port 8181 and 5000 opened and is connected to your Iot Hub. please follow this [documentation](https://docs.microsoft.com/en-us/azure/iot-edge/quickstart-linux) for deployment information -3. Azure Custom Vision account, see the below link to find your training key [here](https://www.customvision.ai/projects#/settings) -![arch_img](https://github.com/linkernetworks/azure-intelligent-edge-patterns/raw/develop/factory-ai-vision/assets/customvisioninfo.png) -4. Azure Media Service, please follow the document to create one https://docs.microsoft.com/en-us/azure/media-services/previous/media-services-portal-create-account#create-an-ams-account - -# Vision on Edge Installer +## Get Started: Vision on Edge Installer -## Option 1: Azure Shell Installer -### Get Started: +### Option 1: Azure Shell Installer +#### Get Started: Please refer to this tutorial to follow the [instruction](https://github.com/linkernetworks/azure-intelligent-edge-patterns/blob/develop/factory-ai-vision/Tutorial/Shell-installer-Tutorial.md) on how to install from Azure shell -## Option 2: Manual installation building a docker container and deploy by Visual Studio Code +### Option 2: Manual installation building a docker container and deploy by Visual Studio Code -### Prerequisites +#### Prerequisites Before installation, You must have the following services set up to use Vision on Edge: @@ -100,7 +96,7 @@ To learn more about this development environment, check out [this tutorial](http - Clone yadavm_factoryai_lpr branch ```bash - git clone https://github.com/Azure-Samples/azure-intelligent-edge-patterns.git --single-branch --branch yadavm_factoryai_lpr + git clone https://github.com/Azure-Samples/azure-intelligent-edge-patterns.git ``` - Go to factoryai directory and open your vscode @@ -168,7 +164,17 @@ To learn more about this development environment, check out [this tutorial](http ### Video Tutorial -- Getting started with VS code [https://youtu.be/ORTwMYOxkVs] +- Tutorial 0 - Build with VS code [https://youtu.be/ORTwMYOxkVs] + +- Tutorial 1: Azure Shell Installer [https://youtu.be/Z-iGND-Xtdg] + +- Tutorial 2 - Start with prebuilt scenario [https://youtu.be/dihAdZTGj-g] + +- Tutorial 3 - Start with creating new project, capture images, tagging images and deploy [https://youtu.be/Ut8UXHR2dCk] + +- Tutorial 4 - Retraining and improve your model [https://youtu.be/TsKvBerShbE] + +- Tutorial 5: Advance capabilities setting [https://youtu.be/GdgVJq2V0Io] # Privacy Notice diff --git a/factory-ai-vision/Tutorial/Shell-installer-Tutorial.md b/factory-ai-vision/Tutorial/Shell-installer-Tutorial.md index b67af78ed..5aba3e414 100644 --- a/factory-ai-vision/Tutorial/Shell-installer-Tutorial.md +++ b/factory-ai-vision/Tutorial/Shell-installer-Tutorial.md @@ -8,9 +8,9 @@ To install the Vision on Edge Solution Accelerator, the following prerequisites 1. You must have an Azure subscription.
if you don’t have one, you can create one here: https://azure.microsoft.com/en-us/pricing/purchase-options/pay-as-you-go/ -2. That subscription must contain an IoT Hub with a registered IoT Edge device (generally this will be an Azure Stack Edge Device). -
At least one IoT Edge with port 8080 and 5000 opened and is connected to your Iot Hub. please follow this [documentation](https://docs.microsoft.com/en-us/azure/iot-edge/quickstart-linux) for deployment information -3. Azure Custom Vision account, see the below link to find your training key [here](https://www.customvision.ai/projects#/settings) +2. That subscription must contain Azure Stack Edge or IoT hub Edge device with port 8181 opened. please follow this [documentation](https://docs.microsoft.com/en-us/azure/iot-edge/quickstart-linux) for deployment information +3. Azure Custom Vision account, see the below link to find your training key [here](https://www.customvision.ai/projects#/settings) and learn more [here](https://azure.microsoft.com/en-us/services/cognitive-services/custom-vision-service/) +![arch_img](https://github.com/linkernetworks/azure-intelligent-edge-patterns/raw/develop/factory-ai-vision/assets/customvisioninfo.png) 4. (Optional) Azure Media Service, please follow the document to create one https://docs.microsoft.com/en-us/azure/media-services/latest/create-account-howto?tabs=portal @@ -19,15 +19,16 @@ To install the Vision on Edge Solution Accelerator, the following prerequisites 1. Open your browser and paste the link https://shell.azure.com/ to open the shell installer. 2. You will need a Azure subscription to continue. Choose your Azure account. ![arch_img](https://github.com/linkernetworks/azure-intelligent-edge-patterns/raw/develop/factory-ai-vision/assets/step1.png) -3. To download acs.zip from github by putting the following command `wget -O acs.zip https://github.com/Azure-Samples/azure-intelligent-edge-patterns/raw/master/factory-ai-vision/Installer/acs.zip` +3. To download installer (acs.zip) from github by putting the following command `wget -O acs.zip https://github.com/Azure-Samples/azure-intelligent-edge-patterns/raw/master/factory-ai-vision/Installer/acs.zip` ![arch_img](https://github.com/linkernetworks/azure-intelligent-edge-patterns/raw/develop/factory-ai-vision/assets/step2.png) -4. Unzip it `unzip -o acs.zip`. If you have downloaded before, the file name might be different with an extension. The file name can be found above if is different from acs.zip listed above. +4. Unzip it `unzip -o acs.zip`. ![arch_img](https://github.com/linkernetworks/azure-intelligent-edge-patterns/raw/develop/factory-ai-vision/assets/step3.png) 5. Execute the installer `bash factory-ai-vision-install.sh` 6. It will check the az command and check if it requires any installing/updating the IoT extension
You would be asked:
Would you like to use an existing Custom Vision Service? (y or n): y +
To learn more about Custom Vision Service, please refer the linke [here](https://azure.microsoft.com/en-us/services/cognitive-services/custom-vision-service/)
If you choose “yes”, you will asking to input endpoint and key.
Please enter your Custom Vision endpoint: xxxxxx
Please enter your Custom Vision Key: xxxxxx @@ -43,7 +44,7 @@ To install the Vision on Edge Solution Accelerator, the following prerequisites
And Choose the number corresponding to your Azure Media Service. This is where you will be asked for the principle secret ![arch_img](https://github.com/linkernetworks/azure-intelligent-edge-patterns/raw/develop/factory-ai-vision/assets/step6.png) -9. Or if you don’t have one, but you would like to install with LVA, it will create new azure media service principle for you. +9. Or if you don’t have one, but you would like to install with LVA, please go ahead and create a [new account] and come back to continue the installer.
And please copy the "SERVICE-PRINCIPLE-SECRET" information in the bottom
You will need the secret information for later use ![arch_img](https://github.com/linkernetworks/azure-intelligent-edge-patterns/raw/develop/factory-ai-vision/assets/step7.png) @@ -57,5 +58,13 @@ To install the Vision on Edge Solution Accelerator, the following prerequisites ![arch_img](https://github.com/linkernetworks/azure-intelligent-edge-patterns/raw/develop/factory-ai-vision/assets/step9.png) 12. The installation will be started after. Please wait for couple minutes to complete the installation. -
Open your browser, connect to http://YOUR_IP:8080 +
You can check the deployment status on the [Azure portal](https://portal.azure.com/#home) +13. Open your browser, connect to http://YOUR_IP:8181 + +14. Check out our tutorials on youtube channel + +- Tutorial 2 - [Start with prebuilt scenario](https://youtu.be/dihAdZTGj-g) +- Tutorial 3 - [Start with creating new project, capture images, tagging images and deploy](https://youtu.be/Ut8UXHR2dCk) +- Tutorial 4 - [Retraining and improve your model](https://youtu.be/TsKvBerShbE) +- Tutorial 5: [Advance capabilities setting](https://youtu.be/GdgVJq2V0Io) diff --git a/factory-ai-vision/assets/step2.png b/factory-ai-vision/assets/step2.png index fe0019591..fb2a79130 100644 Binary files a/factory-ai-vision/assets/step2.png and b/factory-ai-vision/assets/step2.png differ diff --git a/factory-ai-vision/assets/step3.png b/factory-ai-vision/assets/step3.png index 0b79d5245..df0bc26b2 100644 Binary files a/factory-ai-vision/assets/step3.png and b/factory-ai-vision/assets/step3.png differ