diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index eb52e89cc..8343dafb3 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -61,6 +61,8 @@ jobs: device: AM64X - os: debian device: AM62LX + - os: edgeai + device: AM62AX steps: - name: Checkout diff --git a/configs/AM62AX/AM62AX_edgeai_config.txt b/configs/AM62AX/AM62AX_edgeai_config.txt new file mode 100644 index 000000000..823dc6dd1 --- /dev/null +++ b/configs/AM62AX/AM62AX_edgeai_config.txt @@ -0,0 +1,42 @@ +# General family replacement variables and configuration values for Linux build + +# This file is processed by Python scripts to define both replacement +# variable and configuration values for a device family build. +# For replacement variables, all variables should be listed on a single +# line, and all the variables should be listed on consecutive lines +# (i.e. no additional blank lines between the replacement variables). +# Similarly, all the configuration values should be listed on individual, +# consecutive lines (with no blank linues between the configuration values). +# +# Note that neither replacement variables nor configuration values may contain +# a colon (i.e. ":") in their name. However, values may contain colons. + +Replacement Variables +--------------------- +'__PART_FAMILY_NAME__' : 'AM62Ax' +'__PART_FAMILY_DEVICE_NAMES__' : 'AM62Ax' +'__PRODUCT_LINE_NAME__' : 'Sitara MPU' +'__SDK_BUILD_MACHINE__' : 'am62a-sk' +'__SDK_FULL_NAME__' : 'Processor SDK Linux Edge AI AM62Ax' +'__SDK_SHORT_NAME__' : 'PSDK EdgeAI' +'__SDK_INSTALL_FILE__' : 'ti-processor-sdk-linux-edgeai-am62a-evm--Linux-x86-Install.bin' +'__SDK_INSTALL_DIR__' : 'ti-processor-sdk-linux-edgeai-am62a-evm-' +'__SDK_DOWNLOAD_URL__' : '`Processor SDK AM62A Download Page `__' +'__LINUX_UBUNTU_VERSION_LONG__' : '22.04 (64-bit)' +'__LINUX_UBUNTU_VERSION_SHORT__' : '22.04' +'__OPTEE_PLATFORM_FLAVOR__' : 'am62axx' +'__RTOS_UBUNTU_VERSION_LONG__' : '22.04 (64-bit)' +'__WINDOWS_SUPPORTED_LONG__' : '10 (64-bit)' +'__FEATURINGMATRIX__' : \ +'__SYSFW_CORE_NAME__' : 'TIFS' + +Configuration Values +-------------------- +'CONFIG_part_family' : 'AM62AX_family' +'CONFIG_part_variant' : 'AM62AX' +'CONFIG_sdk' : 'PLSDK' +'CONFIG_icss_support' : 'yes' +'CONFIG_rt_linux_support' : 'yes' +'CONFIG_gpu_ip' : 'Rogue_AXE' +'CONFIG_crypto' : 'sa2ul' + diff --git a/configs/AM62AX/AM62AX_edgeai_tags.py b/configs/AM62AX/AM62AX_edgeai_tags.py new file mode 100644 index 000000000..a4f599039 --- /dev/null +++ b/configs/AM62AX/AM62AX_edgeai_tags.py @@ -0,0 +1,9 @@ +# Device Family name +fam_name = 'AM62AX' + +# Project name and HTML title +project = u'Processor SDK Linux Edge AI for AM62Ax' +html_title = 'Processor SDK AM62Ax Documentation' + +# The master toctree document. +master_doc = 'devices/AM62AX/edgeai/index' diff --git a/configs/AM62AX/AM62AX_edgeai_toc.txt b/configs/AM62AX/AM62AX_edgeai_toc.txt new file mode 100644 index 000000000..0fafc7276 --- /dev/null +++ b/configs/AM62AX/AM62AX_edgeai_toc.txt @@ -0,0 +1,18 @@ +devices/AM62AX/edgeai/datasheet_optiflow +devices/AM62AX/edgeai/datasheet_tiovx_apps +devices/AM62AX/edgeai/connectivity +devices/AM62AX/edgeai/pi_hdr_programming +devices/AM62AX/edgeai/faq +devices/AM62AX/edgeai/getting_started +devices/AM62AX/edgeai/index +devices/AM62AX/edgeai/test_report +devices/AM62AX/edgeai/release_notes + +edgeai/sdk_overview +edgeai/configuration_file +edgeai/edgeai_dataflows +edgeai/inference_models +edgeai/sample_apps +edgeai/measure_perf +edgeai/docker_environment +edgeai/sdk_components diff --git a/source/devices/AM62AX/edgeai/connectivity.rst b/source/devices/AM62AX/edgeai/connectivity.rst new file mode 100644 index 000000000..91f5b1fe2 --- /dev/null +++ b/source/devices/AM62AX/edgeai/connectivity.rst @@ -0,0 +1,8 @@ +============================ +Connectivity and Peripherals +============================ + +.. toctree:: + :maxdepth: 2 + + pi_hdr_programming \ No newline at end of file diff --git a/source/devices/AM62AX/edgeai/datasheet/optiflow/optiflow_camera_am62a.csv b/source/devices/AM62AX/edgeai/datasheet/optiflow/optiflow_camera_am62a.csv new file mode 100644 index 000000000..64f70c9e1 --- /dev/null +++ b/source/devices/AM62AX/edgeai/datasheet/optiflow/optiflow_camera_am62a.csv @@ -0,0 +1,5 @@ +Model Name,A53 Load (%),mcu1_0,C7x Load (%), VISS (%), LDC , BLNF, MSC_0 (%), MSC_1 (%), DOF , SDE , GPU , VISS (%), LDC , BLNF, MSC_0 (%), MSC_1 (%), DOF , SDE , GPU ,DDR Read BW (MB/s),DDR Read Peak BW (MB/s),DDR Write BW (MB/s),DDR Write Peak BW (MB/s),DDR Total BW (MB/s),DDR Total Peak BW (MB/s),temp_thermal_zone0(DDR),temp_thermal_zone1(CPU),temp_thermal_zone2(C7x),FPS +ONR-CL-6360-regNetx-200mf,11.57,2.99,15.42,18.17,0.00,0.00,26.71,13.89,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,760,760,336,336,1096,1096,53.91,46.79,47.92,33 +ONR-OD-8200-yolox-nano-lite-mmdet-coco-416x416,9.59,2.88,26.91,17.80,0.00,0.00,26.17,14.49,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,943,943,401,401,1344,1344,53.91,46.12,46.79,30 +TFL-CL-0000-mobileNetV1-mlperf,9.26,2.98,12.99,17.73,0.00,0.00,26.05,13.18,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,704,704,326,326,1030,1030,53.03,46.12,47.47,30 +TFL-OD-2020-ssdLite-mobDet-DSP-coco-320x320,7.56,2.90,40.47,17.86,0.00,0.00,26.26,13.94,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,941,941,399,399,1340,1340,53.91,47.02,47.02,30 diff --git a/source/devices/AM62AX/edgeai/datasheet/optiflow/optiflow_video_am62a.csv b/source/devices/AM62AX/edgeai/datasheet/optiflow/optiflow_video_am62a.csv new file mode 100644 index 000000000..ca2cab0db --- /dev/null +++ b/source/devices/AM62AX/edgeai/datasheet/optiflow/optiflow_video_am62a.csv @@ -0,0 +1,5 @@ +Model Name,A53 Load (%),mcu1_0,C7x Load (%), VISS (%), LDC , BLNF, MSC_0 (%), MSC_1 (%), DOF , SDE , GPU , VISS (%), LDC , BLNF, MSC_0 (%), MSC_1 (%), DOF , SDE , GPU ,DDR Read BW (MB/s),DDR Read Peak BW (MB/s),DDR Write BW (MB/s),DDR Write Peak BW (MB/s),DDR Total BW (MB/s),DDR Total Peak BW (MB/s),temp_thermal_zone0(DDR),temp_thermal_zone1(CPU),temp_thermal_zone2(C7x),FPS +ONR-CL-6360-regNetx-200mf,20.00,3.47,29.47,0.00,0.00,0.00,24.80,25.32,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1169,1169,490,490,1659,1659,52.81,45.21,46.12,60 +ONR-OD-8200-yolox-nano-lite-mmdet-coco-416x416,15.71,2.93,54.93,0.00,0.00,0.00,25.17,23.31,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1560,1560,618,618,2178,2178,50.38,42.23,44.07,60 +TFL-CL-0000-mobileNetV1-mlperf,19.39,3.45,26.05,0.00,0.00,0.00,25.12,25.61,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1110,1110,486,486,1596,1596,52.59,45.66,46.79,60 +TFL-OD-2020-ssdLite-mobDet-DSP-coco-320x320,13.13,3.16,81.96,0.00,0.00,0.00,25.15,23.56,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1559,1559,617,617,2176,2176,52.81,44.30,46.12,60 diff --git a/source/devices/AM62AX/edgeai/datasheet/tiovx-apps/tiovx_apps_camera_am62a.csv b/source/devices/AM62AX/edgeai/datasheet/tiovx-apps/tiovx_apps_camera_am62a.csv new file mode 100644 index 000000000..6c5393611 --- /dev/null +++ b/source/devices/AM62AX/edgeai/datasheet/tiovx-apps/tiovx_apps_camera_am62a.csv @@ -0,0 +1,5 @@ +Model Name,MPU Load (%) ,C7x_0 Load (%) , VISS Load (%) , MSC0 Load (%) , MSC1 Load (%) ,DDR Read BW (MB/s),DDR Write BW (MB/s),DDR Total BW (MB/s),FPS,Inference time (ms), + ONR-OD-8200-yolox-nano-lite-mmdet-coco-416x416, 3.20, 27.01, 17.32, 38.67, 14.86, 934, 436, 1370, 30, 8.895 + TFL-OD-2020-ssdLite-mobDet-DSP-coco-320x320, 4.12, 40.69, 17.31, 38.65, 14.25, 946, 442, 1388, 30, 13.434 + ONR-CL-6360-regNetx-200mf, 4.34, 14.70, 17.32, 38.62, 13.53, 697, 354, 1051, 30, 4.803 + TFL-CL-0000-mobileNetV1-mlperf, 4.12, 13.10, 17.32, 38.63, 13.54, 678, 359, 1037, 30, 4.269 diff --git a/source/devices/AM62AX/edgeai/datasheet/tiovx-apps/tiovx_apps_video_am62a.csv b/source/devices/AM62AX/edgeai/datasheet/tiovx-apps/tiovx_apps_video_am62a.csv new file mode 100644 index 000000000..788c97101 --- /dev/null +++ b/source/devices/AM62AX/edgeai/datasheet/tiovx-apps/tiovx_apps_video_am62a.csv @@ -0,0 +1,5 @@ +Model Name,MPU Load (%) ,C7x_0 Load (%) , MSC0 Load (%) , MSC1 Load (%) ,DDR Read BW (MB/s),DDR Write BW (MB/s),DDR Total BW (MB/s),FPS,Inference time (ms), + ONR-OD-8200-yolox-nano-lite-mmdet-coco-416x416, 9.82, 54.55, 52.47, 24.12, 1546, 743, 2289, 60, 8.985 + TFL-OD-2020-ssdLite-mobDet-DSP-coco-320x320, 7.28, 82.35, 49.82, 23.77, 1566, 757, 2323, 60, 13.618 + ONR-CL-6360-regNetx-200mf, 5.65, 29.29, 65.78, 26.15, 1081, 592, 1673, 60, 4.788 + TFL-CL-0000-mobileNetV1-mlperf, 8.58, 26.08, 65.46, 26.14, 1050, 605, 1655, 60, 4.251 diff --git a/source/devices/AM62AX/edgeai/datasheet_optiflow.rst b/source/devices/AM62AX/edgeai/datasheet_optiflow.rst new file mode 100644 index 000000000..ef3757363 --- /dev/null +++ b/source/devices/AM62AX/edgeai/datasheet_optiflow.rst @@ -0,0 +1,50 @@ +==================== +Datasheet (Optiflow) +==================== + +The performance measurements includes the following, + +#. **FPS** : Effective framerate at which the application runs +#. **Total time** : Average time taken to process each frame, which includes + pre-processing, inference and post-processing time +#. **Inference time** : Average time taken to infer each frame +#. **CPU loading** : Loading on different CPU cores present +#. **DDR BW** : DDR read and write BW used +#. **HWA Loading** : Loading on different Hardware accelerators present + +Following are the latest performance numbers of the OpTIFlow demos: + +Source : **Video** +============================== + +| Resolution : **1280x768** +| Encoding : **h264** +| + +.. figure:: ../../../images/edgeai/datasheet_optiflow_pipeline1.png + :scale: 75 + :align: center + + GStreamer based data-flow pipeline with video file input source and display output + +.. csv-table:: + :file: datasheet/optiflow/optiflow_video_am62a.csv + :header-rows: 1 + +Source : **CSI Camera with VISS (imx219)** +====================================================== + +| Capture Framerate : **30 fps** +| Resolution : **1920x1080** +| format : **SRGGB8** +| + +.. figure:: ../../../images/edgeai/datasheet_optiflow_pipeline2.png + :scale: 75 + :align: center + + GStreamer based data-flow pipeline with IMX219 sensor, ISP and display + +.. csv-table:: + :file: datasheet/optiflow/optiflow_camera_am62a.csv + :header-rows: 1 diff --git a/source/devices/AM62AX/edgeai/datasheet_tiovx_apps.rst b/source/devices/AM62AX/edgeai/datasheet_tiovx_apps.rst new file mode 100644 index 000000000..be65681a2 --- /dev/null +++ b/source/devices/AM62AX/edgeai/datasheet_tiovx_apps.rst @@ -0,0 +1,38 @@ +====================== +Datasheet (TIOVX Apps) +====================== + +The performance measurements includes the following, + +#. **FPS** : Effective framerate at which the application runs +#. **Total time** : Average time taken to process each frame, which includes + pre-processing, inference and post-processing time +#. **Inference time** : Average time taken to infer each frame +#. **CPU loading** : Loading on different CPU cores present +#. **DDR BW** : DDR read and write BW used +#. **HWA Loading** : Loading on different Hardware accelerators present + +Following are the latest performance numbers of the edgeai-tiovx-apps: + +Source : **Video** +============================== + +| Resolution : **1280x768** +| Encoding : **h264** +| + +.. csv-table:: + :file: datasheet/tiovx-apps/tiovx_apps_video_am62a.csv + :header-rows: 1 + +Source : **CSI Camera with VISS (imx219)** +====================================================== + +| Capture Framerate : **30 fps** +| Resolution : **1920x1080** +| format : **SRGGB8** +| + +.. csv-table:: + :file: datasheet/tiovx-apps/tiovx_apps_camera_am62a.csv + :header-rows: 1 diff --git a/source/devices/AM62AX/edgeai/faq.rst b/source/devices/AM62AX/edgeai/faq.rst new file mode 100644 index 000000000..5e1a70998 --- /dev/null +++ b/source/devices/AM62AX/edgeai/faq.rst @@ -0,0 +1,53 @@ +.. _pub_edgeai_FAQs: + +==== +FAQs +==== + +.. _pub_edgeai_multiple_usb_cams: + +Getting Error when trying to capture from multiple USB cameras simultaneously +============================================================================= + +This is a common issue faced in the industry with many USB cameras. +You may get errors like ``Failed to allocate required memory.`` when tying to +capture simultaneously from more than one usb cameras. + +The root cause for this issue is that most of the USB cameras requests for more +BW than actually required. If the use case is to capture only from 2 USB cameras +, it can be done by connecting one of them to USB type-C port since it is +internally connected to a separate instance of USB controller. But if the use +case is to capture from more than 2 cameras, you need to modify the UVC driver +to override the BW allocation. + +The root cause and work around is explained in detail in this +blog `Multiple UVC cameras on linux `_ + +To apply the work around to our SDK, use below steps + +#. Download and install `Processor SDK Linux `__ +#. Get the patch to add `bandwidth_cap` parameter to `uvcvideo` kernel module `uvcvideo patch `_ +#. Apply the patch to kernel source in Processor SDK Linux Install path. Please refer to `PSDK building kernel `_ +#. Compile only `uvcvideo` module using below command + + .. code-block:: bash + + make ARCH=arm64 CROSS_COMPILE=aarch64-none-linux-gnu- am62ax_evm_a53_defconfig + make ARCH=arm64 CROSS_COMPILE=aarch64-none-linux-gnu- ./drivers/media/usb/uvc/uvcvideo.ko +#. Copy `./drivers/media/usb/uvc/uvcvideo.ko` to sk +#. Remove `uvcvideo` module and install modified version using below commands + + .. code-block:: bash + + rmmmod uvcvideo + insmod uvcvideo.ko +#. Set the desired BW cap as shown below + + .. code-block:: bash + + echo 1200 > /sys/module/uvcvideo/parameters/bandwidth_cap + +.. note:: + + The unit of BW here is Bytes/125us, you can estimate the approximate BW + requirement by multiplying fps with size/frame diff --git a/source/devices/AM62AX/edgeai/getting_started.rst b/source/devices/AM62AX/edgeai/getting_started.rst new file mode 100644 index 000000000..bb4010910 --- /dev/null +++ b/source/devices/AM62AX/edgeai/getting_started.rst @@ -0,0 +1,327 @@ +.. _pub_edgeai_getting_started: + +=============== +Getting started +=============== + +.. _pub_edgeai_getting_started_hardware: + +Hardware setup +============== + +.. _pub_edgeai_hw_requirements_eaik: + +|__PART_FAMILY_NAME__| EVM +-------------------------- + +|__PART_FAMILY_NAME__| EVM is a low cost, small form factor board designed +to bring smart cameras, robots and intelligent machines to life. +For more information related to the board, full list of peripherals supported, +pin settings for boot modes and more +visit |__PART_FAMILY_NAME__| EVM `User guide `_ + +To run the demos on |__PART_FAMILY_NAME__| EVM you will require, + + - |__PART_FAMILY_NAME__| EVM + - Minimum 16GB high performance SD card + - External Power Supply or Power Accessory Requirements + - USB 2.0 mouse + - Full-HD (1080p) HDMI display + - Optional 100Base-T Ethernet cable connected to internet + - Optional UART cable + - Optional USB camera (Any V4L2 compliant 1MP/2MP camera, Eg. Logitech C270/C920/C922) + + a. Nominal Output Voltage: 5-20VDC + b. Maximum Output Current: 5000 mA + c. Refer to |__PART_FAMILY_NAME__| EVM `User guide `_ + for more details. + +Connect the components to the EVM as shown in the image. + +.. figure:: ../../../images/edgeai/am62a_evm.jpg + :scale: 30 + :align: center + + |__PART_FAMILY_NAME__| EVM connections + +Set the boot pins to SD boot mode as shown in the following image. + +.. figure:: ../../../images/edgeai/am62a_bootpins.jpg + :scale: 20 + :align: center + + |__PART_FAMILY_NAME__| EVM boot pins + +.. _pub_edgeai_usb_camera: + +USB Camera +---------- + +UVC (USB video class) compliant USB cameras are supported by |__PART_FAMILY_NAME__| EVM. +The driver for the same is enabled in SDK. The SDK has been tested with +C270/C920/C922 versions of Logitech USB cameras. Please refer to +:ref:`pub_edgeai_multiple_usb_cams` to stream from multiple USB cameras +simultaneously. + +.. _pub_edgeai_imx219_sensor: + +RPiV2(IMX219) Raw sensor +------------------------ + +**RPiV2 camera module** is supported by |__PART_FAMILY_NAME__| EVM. +It is a 8MP sensor with no ISP, which can transmit raw SRGGB8 frames over +CSI lanes at 1080p resolution at 30 fps. + +For more details visit `Rpi camera module v2 `_ + +.. note:: + + For trying different resolutions of the same sensor we would also require + different DCC binary files to work with tiovxisp and tiovxldc plugins. + + Below is a patch posted upstream to enable other higher fps with lower + resolution modes + https://patchwork.linuxtv.org/project/linux-media/patch/20210115185233.333407-1-angelogioacchino.delregno@somainline.org/ + +The camera can be connected to the RPi header as shown below on |__PART_FAMILY_NAME__|. +Note that the headers have to be lifted up to connect the cameras and put back +firmly to keep the connector cable in place. + +.. figure:: ../../../images/edgeai/am62a_rpi.jpg + :scale: 15 + :align: center + + IMX219 CSI sensor connection with |__PART_FAMILY_NAME__| EVM + +.. note:: + + By default IMX219 is disabled. After connecting the camera you can enable it + by specifying the dtb overlay file in + ``/run/media/BOOT-mmcblk1p1/uEnv.txt`` as below, + + ``name_overlays=ti/k3-am62x-sk-csi2-imx219.dtbo`` + + Reboot the board after editing and saving the file. Dtb files can be found + under /boot/dtb/ti. + + Config file to run analytics pipeline with IMX219 camera is present under + ``/opt/edgeai-gst-apps/configs/imx219_cam_example.yaml``. Please refer + :ref:`pub_edgeai_sample_apps` to use the file. + +Please refer :ref:`pub_edgeai_camera_sources` to know how to list all the cameras +connected and select which one to use for the demo. + +By default IMX219 will be configured to capture at 8 bit, but it also supports +10 bit capture in 16 bit container. To use it in 10 bit mode, below steps are +required: + + - Call setup_cameras.sh script modifying the imx219 format. + + .. code-block:: bash + + export IMX219_CAM_FMT='[fmt:SRGGB10_1X10/1920x1080]' + /opt/edgeai-gst-apps/scripts/setup_cameras.sh + + - Change the imaging binaries to use 10 bit versions + + .. code-block:: bash + + mv /opt/imaging/imx219/linear/dcc_2a.bin /opt/imaging/imx219/linear/dcc_2a_8b_1920x1080.bin + mv /opt/imaging/imx219/linear/dcc_viss.bin /opt/imaging/imx219/linear/dcc_viss_8b_1920x1080.bin + mv /opt/imaging/imx219/linear/dcc_2a_10b_1920x1080.bin /opt/imaging/imx219/linear/dcc_2a.bin + mv /opt/imaging/imx219/linear/dcc_viss_10b_1920x1080.bin /opt/imaging/imx219/linear/dcc_viss.bin + + - Set the input format in the ``/opt/edgeai-gst-apps/configs/imx219_cam_example.yaml`` as ``rggb10`` + +.. _pub_edgeai_fusion_mini: + +Mini Fusion +----------- + +**Mini Fusion** is supported by |__PART_FAMILY_NAME__| EVM. +Mini fusion can be used to connect FPDLink cameras to the EVM via CSI lanes. +Some of the cameras supported are IMX219, IMX390, 0V2312. + +For more details visit `Arducam V3Link Camera Kit `_ + +The interface between |__PART_FAMILY_NAME__|, Mini Fusion board and the camera is shown below. +Note that the headers have to be lifted up to connect the cameras and put back +firmly to keep the connector cable in place. + +.. figure:: ../../../images/edgeai/am62a_mini_fusion.png + :scale: 80 + :align: center + + Fusion Mini connection with |__PART_FAMILY_NAME__| EVM + +.. note:: + + After connecting the camera you can enable it by specifying the dtb overlay + file in ``/run/media/BOOT-mmcblk1p1/uEnv.txt``. Let's take an example of + single IMX219 camera connected to Mini fusion board at Port 0. + + ``name_overlays=ti/k3-am62x-sk-csi2-v3link-fusion.dtbo ti/k3-v3link-imx219-0-0.dtbo`` + + Similarly for camera connected to other port, dtbo can be appended above. + Reboot the board after editing and saving the file. Dtb files can be found + under /boot/dtb/ti. + + Run the script to setup the cameras. + + ``source /opt/edgeai-gst-apps/scripts/setup_cameras_v3link.sh`` + + Config file to run analytics pipeline with IMX219 camera is present under + ``/opt/edgeai-gst-apps/configs/imx219_cam_example.yaml``. Please refer + :ref:`pub_edgeai_sample_apps` to use the file. + + +Software setup +============== + +.. _pub_edgeai_prepare_sd_card: + +Preparing SD card image +----------------------- +Download the latest `SD card .wic image `_ +and flash it to SD card using Balena etcher tool available at: + +https://www.balena.io/etcher/ + +.. note:: + + We have tested with Balena Etcher version 1.7.0 which can be found here, + https://github.com/balena-io/etcher/releases/tag/v1.7.0 + + There seem to be a known-issue with latest 1.7.2 version of Balena Etcher + https://forums.balena.io/t/etcher-error-message-cannot-read-property-message-of-null/350471 + +The tool can be installed either on Windows/Linux. Just download the +etcher image and follow the instructions to prepare the SD card. + +.. figure:: ../../../images/edgeai/balena_etcher.png + :scale: 100 + :align: center + + Balena Etcher tool to flash SD card with Processor SDK Linux Edge AI for |__PART_FAMILY_NAME__| + +The etcher image is created for 16 GB SD cards, if you are using larger SD card, +the root filesystem will automatically expand to use the full SD card capacity +after the first boot. In case it does not automatically expand, use the steps +below on a **Linux PC** + +.. code-block:: bash + + #find the SD card device entry using lsblk (Eg: /dev/sdc) + #use the following commands to expand the filesystem + #Make sure you have write permission to SD card or run the commands as root + + #Unmount the BOOT and rootfs partition before using parted tool + umount /dev/sdX1 + umount /dev/sdX2 + + #Use parted tool to resize the rootfs partition to use + #the entire remaining space on the SD card + #You might require sudo permissions to execute these steps + parted -s /dev/sdX resizepart 2 '100%' + e2fsck -f /dev/sdX2 + resize2fs /dev/sdX2 + + #replace /dev/sdX in above commands with SD card device entry + +.. _pub_edgeai_poweron_boot: + +Power ON and Boot +----------------- +Ensure that the power supply is disconnected before inserting the SD card. +Once the SD card is firmly inserted in its slot and the board is powered ON, +the board will take less than 20sec to boot and start running the +**Edge AI Gallery Application** as shown + +.. figure:: ../../../images/edgeai/am62a_oob_banner.jpg + :scale: 25 + :align: center + + |__PART_FAMILY_NAME__| EVM out-of-box UI demo showing wallpaper + +You can also view the boot log by connecting the UART cable to your PC and +use a serial port communications program. 4 serial ports will come up on your +PC, you should connect to port 0 to get the boot logs + +For Linux OS minicom** works well. +Please refer to the below documentation on 'minicom' for more details. + +https://help.ubuntu.com/community/Minicom + +When starting minicom, turn on the colors options like below: + +.. code-block:: bash + + sudo minicom -D /dev/ttyUSB0 -c on + +.. note:: + + If you are unable to input from keyboard, you might need to **disable Hardware + Flow Control in minicom**. + +For **Windows OS Tera Term** works well. +Please refer to the below documentation on 'TeraTerm' for more details + +https://learn.sparkfun.com/tutorials/terminal-basics/tera-term-windows + +.. note:: + + If using a Windows computer, the user may need to install additional + drivers for ports to show up: https://ftdichip.com/drivers/ + +.. note:: + + Baud rate should be configured to 115200 bps in serial port communication + program. You may not see any log in the UART console if you connect to it + after the booting is complete or login prompt may get lost in between boot + logs, press ENTER to get login prompt + +.. _pub_edgeai_getting_started_init_script: + +As part of the linux systemd ``/opt/edgeai-gst-apps/init_script.sh`` is executed +which does the below, + + - This sets up necessary environment variables. + - If any camera is connected to the board, the script sets it up and prints its device id and other information. + +Once Linux boots login as ``root`` user with no password. + +.. _pub_edgeai_connecting_remotely: + +Connect remotely +---------------- +If you don't prefer the UART console, you can also access the device with the +IP address that is shown on the display. With the IP address one can ssh directly +to the board, view the contents and run the demos. For best experience we recommend +using VSCode which can be downloaded from here, + +https://code.visualstudio.com/download + +You also require the "Remote development extension pack" installed in VSCode +as mentioned here: + +https://code.visualstudio.com/docs/remote/ssh + +.. figure:: ../../../images/edgeai/vs_code.png + :scale: 90 + :align: center + + Microsoft Visual Studio Code for connecting to |__PART_FAMILY_NAME__| EVM via SSH + +If you are using **Ubuntu 22.04** , add the following to **~/.ssh/config** + +.. code-block:: bash + + # Add to ~/.ssh/config. Absolute IP or range of IP can be defined using * + Host 10.24.* + HostKeyAlgorithms=+ssh-rsa + +You can now SSH using terminal. + +.. code-block:: bash + + $ ssh root@10.24.69.123 diff --git a/source/devices/AM62AX/edgeai/index.rst b/source/devices/AM62AX/edgeai/index.rst new file mode 100644 index 000000000..3c89b657c --- /dev/null +++ b/source/devices/AM62AX/edgeai/index.rst @@ -0,0 +1,31 @@ +.. _edgeai-index: + +****************************************************** +Processor SDK Linux Edge AI Software Developer's Guide +****************************************************** + +.. rubric:: Welcome to the Processor SDK Linux Edge AI Software Developer's Guide for AM62Ax + +.. toctree:: + :maxdepth: 5 + :numbered: + + /edgeai/sdk_overview + getting_started + /edgeai/sample_apps + /edgeai/inference_models + /edgeai/configuration_file + /edgeai/edgeai_dataflows + /edgeai/measure_perf + /edgeai/docker_environment + /edgeai/sdk_components + connectivity + datasheet_optiflow + datasheet_tiovx_apps + test_report + release_notes + faq + ++--------------------------------+--------------------------------------------------------------------------------------------------+ +| .. Image:: /images/E2e.jpg | For technical support please post your questions at `http://e2e.ti.com `__. | ++--------------------------------+--------------------------------------------------------------------------------------------------+ diff --git a/source/devices/AM62AX/edgeai/pi_hdr_programming.rst b/source/devices/AM62AX/edgeai/pi_hdr_programming.rst new file mode 100644 index 000000000..d2c8e850e --- /dev/null +++ b/source/devices/AM62AX/edgeai/pi_hdr_programming.rst @@ -0,0 +1,88 @@ +.. _pub_edgeai_pi_hdr_programming: + +============================= +RPi 40-pin header programming +============================= + +TI AM62A SK development board contain a 40 pin GPIO +header, similar to the 40 pin header in the Raspberry Pi. These GPIOs can be +controlled for digital input and output using the Python/CPP libraries provided in the +TI GPIO Library packages. The libraries have the same API as the RPi.GPIO +library for Raspberry Pi in order to provide an easy way to move applications +running on the Raspberry Pi to the TI board. + +TI GPIO Libraries are packaged under ``/opt`` directory. Run the below script +to build and install the gpio libraries + +.. code-block:: bash + + /opt/edgeai-gst-apps# ./scripts/install_ti_gpio_libs.sh + +By default, the 40-pin header is not enabled on AM69A SK board. This can be enabled by +specifying the dtb overlay file ``k3-am62a7-sk-rpi-hdr-ehrpwm.dtbo`` in +``/run/media/BOOT-mmcblk1p1/uEnv.txt`` as given below. + +``name_overlays=ti/k3-am62a7-sk-rpi-hdr-ehrpwm.dtbo`` + +Reboot the board after editing and saving the file. + +.. _pub_edgeai_default_pin_setup: + +40-pin header default configuration +=================================== + +The default pin configuration on the SK board is as follows. Any deviation from this +needs modifications to the Linux DTBO. The table below lists pin numbers in all three +supported modes, namely BOARD, BCM, and SOC. + +BOARD: **Physical Pin Number** +BCM : **Broadcom SOC Numbering** +SOC : **TI SOC Naming** + +.. figure:: ../../../images/edgeai/am62a-sk_exp_hdr.png + :class: float-right + :width: 650 + +.. csv-table:: + :header: "BOARD","BCM","SOC","Function" + + "3", "2", "GPIO0_44", "I2C1 SDA" + "5", "3", "GPIO0_43", "I2C1 SCL" + "12", "18", "GPIO1_14", "HW PWM" + "13", "27", "GPIO0_42", "GPIO" + "15", "22", "GPIO1_22", "GPIO" + "16", "23", "GPIO0_38", "GPIO" + "18", "24", "GPIO0_39", "GPIO" + "22", "25", "GPIO0_14", "GPIO" + "29", "5", "GPIO0_36", "GPIO" + "31", "6", "GPIO0_33", "GPIO" + "32", "12", "GPIO0_40", "GPIO" + "33", "13", "GPIO1_10", "HW PWM" + "35", "19", "GPIO1_13", "HW PWM" + "36", "16", "GPIO1_09", "HW PWM" + "37", "26", "GPIO0_41", "GPIO" + +.. note:: + + Please refer to `SK-AM62A User's Guide `_ + for details on the expansion header pin names and functionality. Also refer to section + 2.1.2 Power Budget Considerations for power/voltage limits on the expansion header pins. + +Repositories +============ +The Python and CPP projects are hosted on Texas Instruments GitHub. The links to the projects +are given below. The details on installation and testing can be found in the respective project +documentation. + +**Python Library**: https://github.com/TexasInstruments/ti-gpio-py.git + +**CPP Library**: https://github.com/TexasInstruments/ti-gpio-cpp.git + +The above repositories are cloned and installed during the initialization process upon initial boot. +The ``gpiozero`` library is also installed as a part fo the initialization. + +Additional References +===================== +Please refer to the lik below for information on the 40-pin header numbering and naming conventions: + +| https://pinout.xyz/# diff --git a/source/devices/AM62AX/edgeai/release_notes.rst b/source/devices/AM62AX/edgeai/release_notes.rst new file mode 100644 index 000000000..5668ca6bd --- /dev/null +++ b/source/devices/AM62AX/edgeai/release_notes.rst @@ -0,0 +1,70 @@ +============== +Release notes +============== + +.. _pub_edgeai_new_in_this_release: + +New in this Release +=================== + + - + +Fixed in this Release +===================== + - EDGEAI_ROBOTICS-1296 - DMABuf import not working with CnM Encoder for resolutions not aligned to 64 bytes + - EDGEAI_ROBOTICS-1295 - High CPU usage while running EdgeAI Demos + - EDGEAI_ROBOTICS-867 - v4l2h264dec Decoder not working for mp4 file + - EDGEAI_ROBOTICS-1258 - TIOVX apps: Buffer management needs to be optimized for v4l2 capture + - EDGEAI_ROBOTICS-1239 - TIOVX apps: V4L2 Decode does not exit cleanly in tiovx-apps + - EDGEAI_ROBOTICS-1200 - Device Agent: No error message for unsupported camera + - EDGEAI_ROBOTICS-1263 - AM62A TIDL: C7x goes into exception with MAIR7 MMU setting for L2 SRAM + + +.. _pub_edgeai_known_issues: + +Known Issues +============ + + - EDGEAI_ROBOTICS-1284 - TIOVX Apps: Jitter with using OpenVX buffers directly in OpenMAX wrapper + - EDGEAI_ROBOTICS-1266 - edgeai-tiovx-apps pipeline latency is the sum of individual components + - EDGEAI_ROBOTICS-1285 - edgeai-tiovx-apps: OMX_wrapper: QNX decode not working if buffers enqueued in default order + + +.. _pub_edgeai_software_components: + +Software components +=================== + +List of software components used in this version + ++------------------------------+---------------------+ +| Component | Version | ++==============================+=====================+ +| Foundation Linux | 10.01.00.08 | ++------------------------------+---------------------+ +| Python | 3.12.4 | ++------------------------------+---------------------+ +| OpenCV | 4.9.0 | ++------------------------------+---------------------+ +| GStreamer | 1.22.12 | ++------------------------------+---------------------+ +| Cmake | 3.28.3 | ++------------------------------+---------------------+ +| Ninja | 1.11.1 | ++------------------------------+---------------------+ +| Meson | 1.3.1 | ++------------------------------+---------------------+ +| NeoAI - DLR | 1.13.0 | ++------------------------------+---------------------+ +| Tensorflow | TIDL_PSDK_10.1 | ++------------------------------+---------------------+ +| TFLite-runtime | TIDL_PSDK_10.1 | ++------------------------------+---------------------+ +| ONNX-runtime | TIDL_PSDK_10.1 | ++------------------------------+---------------------+ +| PyYAML | 6.0.1 | ++------------------------------+---------------------+ +| TI Model Zoo | 10.1.0 | ++------------------------------+---------------------+ +| edgeai-app-stack | 10_01_00_00 | ++------------------------------+---------------------+ diff --git a/source/devices/AM62AX/edgeai/test_report.rst b/source/devices/AM62AX/edgeai/test_report.rst new file mode 100644 index 000000000..bae5890f6 --- /dev/null +++ b/source/devices/AM62AX/edgeai/test_report.rst @@ -0,0 +1,192 @@ +=========== +Test Report +=========== + +Here is the summary of the sanity tests we ran with both Python and C++ demos. +Test cases vary with different inputs, outputs, runtime, models, python/c++ apps. + +1. Inputs: + + * USB Camera (Logitech C270, 1280x720, JPEG) + * IMX219 Camera (Rpi v2 Sony IMX219, 1920x1080, RAW) + * Image files (images under /opt/edgeai-test-data/images) + * Video file (video0_1280_768.h264 file under /opt/edgeai-test-data/videos) + +2. Outputs: + + * Display (HDMI) + * File write to SD card + +3. Inference Type: + + * Image classification + * Object detection + * Semantic segmentation + +4. Runtime/models: + + * DLR + * TFLite + * ONNX + +5. Applications: + + * Python + * C++ + * Optiflow + +6. Platform: + + * Host OS + * Docker + + +Apps test report +===================== + +Single Input Single Output +-------------------------- + +.. csv-table:: + :header: "Category", "# test case", "Pass", "Fail" + + Host OS - Python,126,126,0 + Host OS - C++,126,126,0 + Host OS - Optiflow,126,126,0 + Docker - Python,126,126,0 + Docker - C++,126,126,0 + Docker - Optiflow,126,126,0 + +.. csv-table:: + :header: "S.No", "Models", "Input", "Output", "Host OS - Optiflow", "Host OS - Python App", "Host OS - C++ App", "Docker - Optiflow", "Docker - Python App", "Docker - C++ App", "Comments" + + 1,TVM-CL-3090-mobileNetV2-tv,Image,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 2,TVM-CL-3090-mobileNetV2-tv,Image,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 3,TVM-CL-3090-mobileNetV2-tv,Image,Image-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 4,TVM-CL-3090-mobileNetV2-tv,Video,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 5,TVM-CL-3090-mobileNetV2-tv,Video,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 6,TVM-CL-3090-mobileNetV2-tv,USB Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 7,TVM-CL-3090-mobileNetV2-tv,USB Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 8,TVM-CL-3090-mobileNetV2-tv,IMX219 Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 9,TVM-CL-3090-mobileNetV2-tv,IMX219 Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 10,TFL-CL-0000-mobileNetV1-mlperf,Image,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 11,TFL-CL-0000-mobileNetV1-mlperf,Image,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 12,TFL-CL-0000-mobileNetV1-mlperf,Image,Image-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 13,TFL-CL-0000-mobileNetV1-mlperf,Video,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 14,TFL-CL-0000-mobileNetV1-mlperf,Video,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 15,TFL-CL-0000-mobileNetV1-mlperf,USB Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 16,TFL-CL-0000-mobileNetV1-mlperf,USB Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 17,TFL-CL-0000-mobileNetV1-mlperf,IMX219 Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 18,TFL-CL-0000-mobileNetV1-mlperf,IMX219 Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 19,ONR-CL-6360-regNetx-200mf,Image,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 20,ONR-CL-6360-regNetx-200mf,Image,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 21,ONR-CL-6360-regNetx-200mf,Image,Image-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 22,ONR-CL-6360-regNetx-200mf,Video,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 23,ONR-CL-6360-regNetx-200mf,Video,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 24,ONR-CL-6360-regNetx-200mf,USB Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 25,ONR-CL-6360-regNetx-200mf,USB Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 26,ONR-CL-6360-regNetx-200mf,IMX219 Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 27,ONR-CL-6360-regNetx-200mf,IMX219 Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 28,TVM-OD-5120-ssdLite-mobDet-DSP-coco-320x320,Image,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 29,TVM-OD-5120-ssdLite-mobDet-DSP-coco-320x320,Image,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 30,TVM-OD-5120-ssdLite-mobDet-DSP-coco-320x320,Image,Image-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 31,TVM-OD-5120-ssdLite-mobDet-DSP-coco-320x320,Video,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 32,TVM-OD-5120-ssdLite-mobDet-DSP-coco-320x320,Video,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 33,TVM-OD-5120-ssdLite-mobDet-DSP-coco-320x320,USB Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 34,TVM-OD-5120-ssdLite-mobDet-DSP-coco-320x320,USB Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 35,TVM-OD-5120-ssdLite-mobDet-DSP-coco-320x320,IMX219 Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 36,TVM-OD-5120-ssdLite-mobDet-DSP-coco-320x320,IMX219 Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 37,TFL-OD-2020-ssdLite-mobDet-DSP-coco-320x320,Image,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 38,TFL-OD-2020-ssdLite-mobDet-DSP-coco-320x320,Image,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 39,TFL-OD-2020-ssdLite-mobDet-DSP-coco-320x320,Image,Image-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 40,TFL-OD-2020-ssdLite-mobDet-DSP-coco-320x320,Video,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 41,TFL-OD-2020-ssdLite-mobDet-DSP-coco-320x320,Video,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 42,TFL-OD-2020-ssdLite-mobDet-DSP-coco-320x320,USB Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 43,TFL-OD-2020-ssdLite-mobDet-DSP-coco-320x320,USB Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 44,TFL-OD-2020-ssdLite-mobDet-DSP-coco-320x320,IMX219 Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 45,TFL-OD-2020-ssdLite-mobDet-DSP-coco-320x320,IMX219 Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 46,ONR-OD-8020-ssd-lite-mobv2-mmdet-coco-512x512,Image,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 47,ONR-OD-8020-ssd-lite-mobv2-mmdet-coco-512x512,Image,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 48,ONR-OD-8020-ssd-lite-mobv2-mmdet-coco-512x512,Image,Image-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 49,ONR-OD-8020-ssd-lite-mobv2-mmdet-coco-512x512,Video,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 50,ONR-OD-8020-ssd-lite-mobv2-mmdet-coco-512x512,Video,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 51,ONR-OD-8020-ssd-lite-mobv2-mmdet-coco-512x512,USB Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 52,ONR-OD-8020-ssd-lite-mobv2-mmdet-coco-512x512,USB Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 53,ONR-OD-8020-ssd-lite-mobv2-mmdet-coco-512x512,IMX219 Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 54,ONR-OD-8020-ssd-lite-mobv2-mmdet-coco-512x512,IMX219 Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 55,ONR-OD-8220-yolox-s-lite-mmdet-coco-640x640,Image,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 56,ONR-OD-8220-yolox-s-lite-mmdet-coco-640x640,Image,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 57,ONR-OD-8220-yolox-s-lite-mmdet-coco-640x640,Image,Image-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 58,ONR-OD-8220-yolox-s-lite-mmdet-coco-640x640,Video,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 59,ONR-OD-8220-yolox-s-lite-mmdet-coco-640x640,Video,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 60,ONR-OD-8220-yolox-s-lite-mmdet-coco-640x640,USB Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 61,ONR-OD-8220-yolox-s-lite-mmdet-coco-640x640,USB Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 62,ONR-OD-8220-yolox-s-lite-mmdet-coco-640x640,IMX219 Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 63,ONR-OD-8220-yolox-s-lite-mmdet-coco-640x640,IMX219 Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 64,ONR-OD-8420-yolox-s-lite-mmdet-widerface-640x640,Image,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 65,ONR-OD-8420-yolox-s-lite-mmdet-widerface-640x640,Image,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 66,ONR-OD-8420-yolox-s-lite-mmdet-widerface-640x640,Image,Image-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 67,ONR-OD-8420-yolox-s-lite-mmdet-widerface-640x640,Video,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 68,ONR-OD-8420-yolox-s-lite-mmdet-widerface-640x640,Video,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 69,ONR-OD-8420-yolox-s-lite-mmdet-widerface-640x640,USB Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 70,ONR-OD-8420-yolox-s-lite-mmdet-widerface-640x640,USB Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 71,ONR-OD-8420-yolox-s-lite-mmdet-widerface-640x640,IMX219 Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 72,ONR-OD-8420-yolox-s-lite-mmdet-widerface-640x640,IMX219 Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 73,ONR-OD-8200-yolox-nano-lite-mmdet-coco-416x416,Image,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 74,ONR-OD-8200-yolox-nano-lite-mmdet-coco-416x416,Image,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 75,ONR-OD-8200-yolox-nano-lite-mmdet-coco-416x416,Image,Image-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 76,ONR-OD-8200-yolox-nano-lite-mmdet-coco-416x416,Video,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 77,ONR-OD-8200-yolox-nano-lite-mmdet-coco-416x416,Video,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 78,ONR-OD-8200-yolox-nano-lite-mmdet-coco-416x416,USB Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 79,ONR-OD-8200-yolox-nano-lite-mmdet-coco-416x416,USB Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 80,ONR-OD-8200-yolox-nano-lite-mmdet-coco-416x416,IMX219 Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 81,ONR-OD-8200-yolox-nano-lite-mmdet-coco-416x416,IMX219 Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 82,ONR-OD-8270-yolox-pico-lite-mmdet-coco-320x320,Image,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 83,ONR-OD-8270-yolox-pico-lite-mmdet-coco-320x320,Image,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 84,ONR-OD-8270-yolox-pico-lite-mmdet-coco-320x320,Image,Image-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 85,ONR-OD-8270-yolox-pico-lite-mmdet-coco-320x320,Video,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 86,ONR-OD-8270-yolox-pico-lite-mmdet-coco-320x320,Video,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 87,ONR-OD-8270-yolox-pico-lite-mmdet-coco-320x320,USB Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 88,ONR-OD-8270-yolox-pico-lite-mmdet-coco-320x320,USB Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 89,ONR-OD-8270-yolox-pico-lite-mmdet-coco-320x320,IMX219 Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 90,ONR-OD-8270-yolox-pico-lite-mmdet-coco-320x320,IMX219 Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 91,TVM-SS-5710-deeplabv3lite-mobv2-cocoseg21-512x512,Image,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 92,TVM-SS-5710-deeplabv3lite-mobv2-cocoseg21-512x512,Image,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 93,TVM-SS-5710-deeplabv3lite-mobv2-cocoseg21-512x512,Image,Image-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 94,TVM-SS-5710-deeplabv3lite-mobv2-cocoseg21-512x512,Video,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 95,TVM-SS-5710-deeplabv3lite-mobv2-cocoseg21-512x512,Video,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 96,TVM-SS-5710-deeplabv3lite-mobv2-cocoseg21-512x512,USB Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 97,TVM-SS-5710-deeplabv3lite-mobv2-cocoseg21-512x512,USB Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 98,TVM-SS-5710-deeplabv3lite-mobv2-cocoseg21-512x512,IMX219 Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 99,TVM-SS-5710-deeplabv3lite-mobv2-cocoseg21-512x512,IMX219 Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 100,TFL-SS-2580-deeplabv3_mobv2-ade20k32-mlperf-512x512,Image,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 101,TFL-SS-2580-deeplabv3_mobv2-ade20k32-mlperf-512x512,Image,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 102,TFL-SS-2580-deeplabv3_mobv2-ade20k32-mlperf-512x512,Image,Image-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 103,TFL-SS-2580-deeplabv3_mobv2-ade20k32-mlperf-512x512,Video,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 104,TFL-SS-2580-deeplabv3_mobv2-ade20k32-mlperf-512x512,Video,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 105,TFL-SS-2580-deeplabv3_mobv2-ade20k32-mlperf-512x512,USB Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 106,TFL-SS-2580-deeplabv3_mobv2-ade20k32-mlperf-512x512,USB Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 107,TFL-SS-2580-deeplabv3_mobv2-ade20k32-mlperf-512x512,IMX219 Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 108,TFL-SS-2580-deeplabv3_mobv2-ade20k32-mlperf-512x512,IMX219 Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 109,ONR-SS-8610-deeplabv3lite-mobv2-ade20k32-512x512,Image,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 110,ONR-SS-8610-deeplabv3lite-mobv2-ade20k32-512x512,Image,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 111,ONR-SS-8610-deeplabv3lite-mobv2-ade20k32-512x512,Image,Image-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 112,ONR-SS-8610-deeplabv3lite-mobv2-ade20k32-512x512,Video,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 113,ONR-SS-8610-deeplabv3lite-mobv2-ade20k32-512x512,Video,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 114,ONR-SS-8610-deeplabv3lite-mobv2-ade20k32-512x512,USB Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 115,ONR-SS-8610-deeplabv3lite-mobv2-ade20k32-512x512,USB Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 116,ONR-SS-8610-deeplabv3lite-mobv2-ade20k32-512x512,IMX219 Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 117,ONR-SS-8610-deeplabv3lite-mobv2-ade20k32-512x512,IMX219 Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 118,ONR-SS-7618-deeplabv3lite-mobv2-qat-robokit-768x432,Image,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 119,ONR-SS-7618-deeplabv3lite-mobv2-qat-robokit-768x432,Image,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 120,ONR-SS-7618-deeplabv3lite-mobv2-qat-robokit-768x432,Image,Image-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 121,ONR-SS-7618-deeplabv3lite-mobv2-qat-robokit-768x432,Video,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 122,ONR-SS-7618-deeplabv3lite-mobv2-qat-robokit-768x432,Video,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 123,ONR-SS-7618-deeplabv3lite-mobv2-qat-robokit-768x432,USB Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 124,ONR-SS-7618-deeplabv3lite-mobv2-qat-robokit-768x432,USB Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + 125,ONR-SS-7618-deeplabv3lite-mobv2-qat-robokit-768x432,IMX219 Camera,Display,Pass,Pass,Pass,Pass,Pass,Pass,- + 126,ONR-SS-7618-deeplabv3lite-mobv2-qat-robokit-768x432,IMX219 Camera,Video-Filewrite,Pass,Pass,Pass,Pass,Pass,Pass,- + +.. note:: + + Please refer to the :ref:`pub_edgeai_known_issues` section for more details diff --git a/source/devices/AM62AX/edgeai/version.txt b/source/devices/AM62AX/edgeai/version.txt new file mode 100644 index 000000000..3caad1b12 --- /dev/null +++ b/source/devices/AM62AX/edgeai/version.txt @@ -0,0 +1 @@ +10_01_00 diff --git a/source/edgeai/Release_Specific/Processor_SDK_Supported_Platforms_and_Versions.rst.inc b/source/edgeai/Release_Specific/Processor_SDK_Supported_Platforms_and_Versions.rst.inc new file mode 100644 index 000000000..7dc52e676 --- /dev/null +++ b/source/edgeai/Release_Specific/Processor_SDK_Supported_Platforms_and_Versions.rst.inc @@ -0,0 +1,37 @@ +.. _release-specific-supported-platforms-and-versions: + +************************************ +Supported Platforms and Versions +************************************ + +.. rubric:: Supported Host Operating Systems + :name: supported-host-operating-systems + +The following operating systems have been validated to work with our +SDK. + +.. ifconfig:: CONFIG_sdk in ('PSDKL') + + - **Linux SDK** + + +---------------------------+-------------------------+ + | **Operating System** | | **Version** | + +---------------------------+-------------------------+ + | Ubuntu | | 22.04 (64-bit) | + +---------------------------+-------------------------+ + +| + +.. rubric:: Supported Platforms and EVMs + :name: supported-platforms-and-evms + +The following platforms and SKs are supported with Processor SDK. + +.. ifconfig:: CONFIG_part_family in ('J7_family') + + +--------------+-----------+-----------+-----------------------+-------------------+ + | **Platform** | **EVM** | **Tested | **Document** | **Processor SDK | + | | | Version** | | Linux Automotive**| + +--------------+-----------+-----------+-----------------------+-------------------+ + | J721e/DRA829 | J721e EVM | Alpha | Hardware User's Guide | Y | + +--------------+-----------+-----------+-----------------------+-------------------+ diff --git a/source/edgeai/configuration_file.rst b/source/edgeai/configuration_file.rst new file mode 100644 index 000000000..4a4ff5123 --- /dev/null +++ b/source/edgeai/configuration_file.rst @@ -0,0 +1,397 @@ +.. _pub_edgeai_configuration: + +======================== +Configuring applications +======================== + +The demo config file uses YAML format to define input sources, models, outputs +and finally the flows which defines how everything is connected. Config files +for out-of-box demos are kept in ``edgeai-gst-apps/configs`` folder. The +folder contains config files for all the use cases and also multi-input and +multi-inference case. The folder also has a template YAML file +``app_config_template.yaml`` which has detailed explanation of all the +parameters supported in the config file. + +Config file is divided in 4 sections: + +#. Inputs +#. Models +#. Outputs +#. Flows + +Inputs +====== + +The input section defines a list of supported inputs like camera, video files etc. +Their properties like shown below. + +.. code-block:: yaml + + inputs: + input0: #Camera Input + source: /dev/video-usb-cam0 #Device file entry of the camera + format: jpeg #Input data format supported by camera + width: 1280 #Width and Height of the input + height: 720 + framerate: 30 #Framerate of the source + + input1: #Video Input + source: /opt/edgeai-test-data/videos/video0_1280_768.h264 #Video file + format: h264 #File encoding format + width: 1280 + height: 768 + framerate: 30 + + input2: #Image Input + source: /opt/edgeai-test-data/videos/images/%04d.jpg #Sequence of Image files, printf style formatting is used + width: 1280 + height: 720 + index: 0 #Starting Index (optional) + framerate: 1 + +All supported inputs are listed in template config file. +Below are the details of most commonly used inputs. + +.. _pub_edgeai_camera_sources: + +Camera sources (v4l2) +--------------------- + +**v4l2src** GStreamer element is used to capture frames from camera sources +which are exposed as v4l2 devices. In Linux, there are many devices which are +implemented as v4l2 devices. Not all of them will be camera devices. You need +to make sure the correct device is configured for running the demo successfully. + +``init_script.sh`` is ran as part of systemd, which detects all cameras connected +and prints the detail like below in the console: + +.. code-block:: bash + + /opt/edgeai-gst-apps# ./init_script.sh + USB Camera detected + device = /dev/video-usb-cam0 + format = jpeg + CSI Camera 0 detected + device = /dev/video-imx219-cam0 + format = [fmt:SRGGB8_1X8/1920x1080] + subdev_id = /dev/v4l-imx219-subdev0 + isp_required = yes + +script can also be run manually later to get the camera details. + +From the above log we can determine that 1 USB camera is connected +(/dev/video-usb-cam0), and 1 CSI camera is connected (/dev/video-imx219-cam0) which is IMX219 raw +sensor and needs ISP. + +Using this method, you can configure correct device for camera capture in the +input section of config file. + +.. code-block:: bash + + input0: + source: /dev/video-usb-cam0 #USB Camera + format: jpeg #if connected USB camera supports jpeg + width: 1280 + height: 720 + framerate: 30 + + input1: + source: /dev/video-imx219-cam0 #IMX219 raw sensor that needs ISP + format: rggb #ISP will be added in the pipeline + width: 1920 + height: 1080 + framerate: 30 + subdev-id: /dev/v4l-imx219-subdev0 #needed by ISP to control sensor params via ioctls + +Make sure to configure correct ``format`` for camera input. ``jpeg`` for USB +camera that supports MJPEG (Ex. C270 logitech USB camera). ``auto`` for CSI +camera to allow GStreamer to negotiate the format. ``rggb`` for sensor +that needs ISP. + +Video sources +------------- + +H.264 and H.265 encoded videos can be provided as input sources to the demos. +Sample video files are provided under ``/opt/edgeai-test-data/videos/`` + +.. code-block:: yaml + + input1: + source: /opt/edgeai-test-data/videos/video0_1280_768.h264 + format: h264 + width: 1280 + height: 768 + framerate: 30 + + input2: + source: /opt/edgeai-test-data/videos/video0_1920_1088.h264 + format: h264 + width: 1920 + height: 1088 + framerate: 30 + +Make sure to configure correct ``format`` for video input as shown above. +By default the format is set to ``auto`` which will then use the GStreamer +bin ``decodebin`` instead. + +Image sources +------------- + +JPEG compressed images can be provided as inputs to the demos. A sample set of +images are provided under ``/opt/edgeai-test-data/images``. The names of the +files are numbered sequentially and incrementally and the demo plays the files +at the fps specified by the user. + +.. code-block:: yaml + + input2: + source: /opt/edgeai-test-data/images/%04d.jpg + width: 1280 + height: 720 + index: 0 + framerate: 1 + +RTSP sources +------------ + +H.264 encoded video streams either coming from a RTSP compliant IP camera or +via RTSP server running on a remote PC can be provided as inputs to the demo. + +.. code-block:: yaml + + input0: + source: rtsp://172.24.145.220:8554/test # rtsp stream url, replace this with correct url + width: 1280 + height: 720 + framerate: 30 + +Models +====== + +The model section defines a list of models that are used in the demo. Path to +the model directory is a required argument for each model and rest are optional +properties specific to given use cases like shown below. + +.. code-block:: yaml + + models: + model0: + model_path: /opt/model_zoo/TFL-OD-2020-ssdLite-mobDet-DSP-coco-320x320 #Model Directory + viz_threshold: 0.6 #Visualization threshold for adding bounding boxes (optional) + model1: + model_path: /opt/model_zoo/ONR-CL-6360-regNetx-200mf + topN: 5 #Number of top N classes (optional) + model2: + model_path: /opt/model_zoo/ONR-SS-8610-deeplabv3lite-mobv2-ade20k32-512x512 + alpha: 0.4 #alpha for blending segmentation mask (optional) + + +Below are some of the use case specific properties: + +#. **viz_threshold**: Score threshold to draw the bounding boxes for detected + objects in object detection. This can be used to control the number of boxes + in the output, increase if there are too many and decrease if there are very + few +#. **topN**: Number of most probable classes to overlay on image classification + output +#. **alpha**: This determines the weight of the mask for blending the semantic + segmentation output with the input image ``alpha * mask + (1 - alpha) * image`` + +The content of the model directory and its structure is discussed in detail in +:ref:`pub_edgeai_import_custom_models` + + +Outputs +======= + +The output section defines a list of supported outputs. + +.. code-block:: yaml + + outputs: + output0: #Display Output + sink: kmssink + width: 1920 #Width and Height of the output + height: 1080 + overlay-perf-type: graph #Overlay performance stat (graph or text default:No overlay) + connector: 39 #Connector ID for kmssink (optional) + + output1: #Video Output + sink: /opt/edgeai-test-data/outputs/output_video.mkv #Output video file + width: 1920 + height: 1080 + + output2: #Image Output + sink: /opt/edgeai-test-data/outputs/output_image_%04d.jpg #Image file name, printf style formatting is used + width: 1920 + height: 1080 + + output3: + sink: remote #Publish output to udp port as jpeg encoded frames + width: 1920 + height: 1080 + port: 8081 #udp port (default:8081) + host: 127.0.0.1 #udp host (default:0.0.0.0) + encoding: jpeg #encoding type (jpeg or mp4) + + +All supported outputs are listed in template config file. +Below are the details of most commonly used outputs + +Display sink (kmssink) +---------------------- + +When you have only one display connected to the SK, kmssink will try to use +it for displaying the output buffers. In case you have connected multiple +display monitors (e.g. Display Port and HDMI), you can select a specific display +for kmssink by passing a specific connector ID number. +Following command finds out the connected displays available to use. + +.. note:: + + Run this command to check which display is connected. The first number in each + line is the connector-id to be used in the next step. + +.. code-block:: bash + + /opt/edgeai-gst-apps# modetest -M tidss -c | grep connected + 39 38 connected DP-1 530x300 12 38 + 48 0 disconnected HDMI-A-1 0x0 0 47 + +Configure the required connector ID in the output section of the config file. + +Video sinks +----------- +The post-processed outputs can be encoded in H.264 format and stored on disk. +Please specify the location of the video file in the configuration file. + +.. code-block:: yaml + + output1: + sink: /opt/edgeai-test-data/outputs/output_video.mkv #(.mkv or .mp4 or .mov) + width: 1920 + height: 1080 + +Image sinks +----------- +The post-processed outputs can be stored as JPEG compressed images. +Please specify the location of the image files in the configuration file. +The images will be named sequentially and incrementally as shown. + +.. code-block:: yaml + + output2: + sink: /opt/edgeai-test-data/outputs/output_image_%04d.jpg + width: 1920 + height: 1080 + +Remote sinks +------------ +Post-processed frames can be encoded as jpeg or h264 frames and send as udp packets +to a port. Please specify the sink as remote in the configuration file. The udp port and +host to send packets to can be defined. If not, default port is 8081 and host +is 127.0.0.1. + +.. code-block:: yaml + + output3: + sink: remote + width: 1920 + height: 1080 + port: 8081 + host: 127.0.0.1 + encoding: jpeg #(jpeg or h264) + +A NodeJS server is provided under ``/opt/edgeai-gst-apps/scripts/remote_streaming`` +which establishes a node server on the target and listens to the udp port (8081) +on localhost (127.0.0.1) and can be used to view the frames remotely. + +.. code-block:: bash + + /opt/edgeai-gst-apps# node scripts/remote_streaming/server.js + + +Flows +===== + +The flows section defines how inputs, models and outputs are connected. +Multiple flows can be defined to achieve multi input, multi inference as shown + +.. note:: + + The format of specifying flows is changed as of 08.05.00 release to enable multiple outputs in the same sub-flow + The older config files may not be compatible from this release onwards and should be changed to below format + +.. code-block:: yaml + + flows: + # flowname : [input,mode1,output,[mosaic_pos_x,mosaic_pos_y,width,height]] + flow0: [input0,model0,output0,[160,90,800,450]] + flow1: [input0,model1,output0,[960,90,800,450]] + flow2: [input1,model2,output0,[160,540,800,450]] + flow3: [input1,model3,output0,[960,540,800,450]] + +Each flow defined here has exactly **1 input** and **1 model**. If multiple +flows have same input, they are clubbed together internally in the application +for optimization. Along with input, models and outputs it is required to define +**n mosaics** which are the position of the inference output in the final output +plane. This is needed because multiple inference outputs can be rendered to same +output (Ex: Display). + + +GStreamer plugins +================= + +The edgeai-gst-apps essentially constructs GStreamer pipelines for dataflow. +This pipeline is constructed optimally and dynamically based on a pool of +specific plugins available on the platform. The defined pool of plugins for +different platform can be found in ``edgeai-gst-apps/configs/gst_plugin_maps.yaml`` +file. + +This file contains the plugin used for certain task and the property of plugin +(if applicable). + +Default GStreamer plugins map for |__PART_FAMILY_NAME__| +-------------------------------------------------------- + +.. code-block:: yaml + + : + dlcolorconvert: + element: tiovxdlcolorconvert + property: + out-pool-size: 4 + colorconvert: + element: tiovxcolorconvert + property: + target: [0,1] + out-pool-size: 4 + scaler: + element: tiovxmultiscaler + property: + target: [0,1] #[MSC targets to balance loads across] + dlpreproc: + element: tiovxdlpreproc + property: + out-pool-size: 4 + mosaic: + element: tiovxmosaic + isp: + element: tiovxisp + ldc: + element: tiovxldc + h264dec: + element: v4l2h264dec + property: + capture-io-mode: 5 #[setting the mode for decoder] + h265dec: + element: v4l2h265dec + h264enc: + element: v4l2h264enc + h265enc: null + jpegenc: + element: jpegenc + inferer: + target: dsp #[dsp for c7x offload, arm for no offload] + core-id: [1] #[specify list of c7x cores to offload models] + diff --git a/source/edgeai/dmpac_demos.rst b/source/edgeai/dmpac_demos.rst new file mode 100644 index 000000000..bf639422a --- /dev/null +++ b/source/edgeai/dmpac_demos.rst @@ -0,0 +1,60 @@ +.. _pub_dmpac_demos: + +================================= +Depth and Motion Perception Demos +================================= + +The |__PART_FAMILY_NAME__| EVM hosts **Depth and Motion** hardware accelerator pack called **DMPAC** +which consists of **Dense Optical Flow** accelerator which can provide dense flow vector +map for upto 2MP resolution at 60fps and **Stereo Disparity Engine** which can provide +disparity values for upto 2MP resolution at 30fps. Below are example GStreamer +pipelines demonstrating the usage of both. + +.. _pub_dmpac_dof_demo: + +Dense Optical Flow +================== + +The below pipeline shows basic functionality of the ``tiovxdof`` GStreamer element +which uses TI's Dense Optical Flow (DOF) accelerator on the SOC + +.. code-block:: bash + + gst-launch-1.0 \ + v4l2src io-mode=2 device=/dev/video2 ! image/jpeg, width=1280, height=720 ! \ + jpegdec ! tiovxdlcolorconvert ! video/x-raw, format=NV12 ! \ + tiovxpyramid ! application/x-pyramid-tiovx, format=GRAY8 ! tee name=split ! \ + queue ! tiovxdof name=dof ! tiovxdofviz ! \ + kmssink sync=false driver-name=tidss \ + split. ! tiovxdelay delay-size=1 ! dof.delayed_sink + +.. figure:: ../images/edgeai/dof_dataflow.png + :scale: 60 + :align: center + + GStreamer based data-flow pipeline for DOF demo + +.. _pub_dmpac_sde_demo: + +Stereo Disparity +================ + +The below pipeline shows basic functionality of the ``tiovxdof`` GStreamer element +which uses TI's Stereo Disparity Engine (SDE) accelerator on the SOC + +.. code-block:: bash + + gst-launch-1.0 \ + filesrc location=$EDGEAI_DATA_PATH/videos/left-1280x720.avi ! \ + avidemux ! h264parse ! v4l2h264dec ! \ + video/x-raw, format=NV12 ! queue ! sde.left_sink \ + filesrc location=$EDGEAI_DATA_PATH/videos/right-1280x720.avi ! \ + avidemux ! h264parse ! v4l2h264dec ! \ + video/x-raw, format=NV12 ! queue ! sde.right_sink \ + tiovxsde name=sde ! tiovxsdeviz ! kmssink sync=false driver-name=tidss + +.. figure:: ../images/edgeai/sde_dataflow.png + :scale: 60 + :align: center + + GStreamer based data-flow pipeline for SDE demo diff --git a/source/edgeai/docker_environment.rst b/source/edgeai/docker_environment.rst new file mode 100644 index 000000000..97310a028 --- /dev/null +++ b/source/edgeai/docker_environment.rst @@ -0,0 +1,226 @@ +.. _pub_edgeai_docker_env: + +================== +Docker Environment +================== + +Docker is a set of "platform as a service" products that uses the OS-level +virtualization to deliver software in packages called containers. +Docker container provides a quick start environment to the developer to +run the out of box demos and build applications. + +The Docker image is based on Ubuntu 22.04.LTS and contains different +open source components like OpenCV, GStreamer, Python and pip packages +which are required to run the demos. The user can choose to install any +additional 3rd party applications and packages as required. + +.. _pub_edgeai_docker_build_ontarget: + +Building Docker image +====================== + +The `docker/Dockerfile` in the edgeai-gst-apps repo describes the recipe for +creating the Docker container image. Feel free to review and update it to +include additional packages before building the image. + +.. note:: + + Building Docker image on target using the provided Dockerfile will take + about 15-20 minutes to complete with good internet connection. + Building Docker containers on target can be slow and resource constrained. + The Dockerfile provided will build on target without any issues but if + you add more packages or build components from source, running out of memory + can be a common problem. As an alternative we highly recommend trying + QEMU builds for cross-compiling the images for arm64 architecture on a PC + and then load the compiled image on target. + +Initiate the Docker image build as shown, + +.. code-block:: bash + + /opt/edgeai-gst-apps/docker# ./docker_build.sh + +Running the Docker container +============================ + +Enter the Docker session as shown, + +.. code-block:: bash + + /opt/edgeai-gst-apps/docker# ./docker_run.sh + +This will start a Ubuntu 22.04.LTS image based Docker container and the prompt +will change as below, + +.. code-block:: bash + + [docker] /opt/edgeai-gst-apps# + + +The Docker container has been created in privilege mode, so that it has root +capabilities to all devices on the target system like Network etc. +The container file system also mounts the target file system of /dev, /opt to +access camera, display and other hardware accelerators the SoC has to offer. + +.. note:: + + It is highly recommended to use the docker_run.sh script to launch the + docker container because this script will take care of saving any changes + made to the filesystem. This will make sure that any modifications to + the Docker filesystem including new package installation, updates to + some files and also command history is saved automatically and is + available the next time you launch the container. The container will + be committed only if you exit from the container explicitly. If you restart + the board without exiting container, any changes done from last saved state + will be lost. + +.. note:: + + After building and running the docker container, one needs to run + ``setup_script.sh`` before running any of the demo applications. + This is required to rebuild all components against the shared libraries + of docker, same should be done when switching back to Yocto + +.. _pub_edgeai_docker_additional_commands: + +Additional Docker commands +========================== + +.. note:: + + This section is provided only for additional reference and not required to + run out-of-box demos + +**Commit Docker container** + +Generally, containers have a short life cycle. If the container has any local +changes it is good to save the changes on top of the existing Docker image. +When re-running the Docker image, the local changes can be restored. + +Following commands show how to save the changes made to the last container. +Note that this is already done automatically by ``docker_run.sh`` when you exit +the container. + +.. code-block:: bash + + cont_id=`docker ps -q -l` + docker commit $cont_id edge_ai_kit + docker container rm $cont_id + + +For more information refer: +`Commit Docker image `_ + +**Save Docker Image** + +Docker image can be saved as tar file by using the command below: + +.. code-block:: console + + docker save --output + +For more information refer here. +`Save Docker image `_ + +**Load Docker image** + +Load a previously saved Docker image using the command below: + +.. code-block:: console + + docker load --input + +For more information refer here. +`Load Docker image `_ + +**Remove Docker image** + +Docker image can be removed by using the command below: + +.. code-block:: console + + Remove selected image: + docker rmi + + Remove all image: + docker image prune -a + +For more information refer +`rmi reference `_ and +`Image prune reference `_ + +**Remove Docker container** + +Docker container can be removed by using the command below: + +.. code-block:: console + + Remove selected container: + docker rm + + Remove all container: + docker container prune + +For more information refer here. +`rm reference `_ and +`Container Prune reference `_ + +Relocating Docker Root Location +=============================== +The default location for Docker files is **/var/lib/docker**. Any Docker images +created will be stored here. This will be a problem anytime the SD card is +updated with a new targetfs. If a secondary storage (SSD or USB based storage) +is available, then it is recommended to relocate the default Docker root +location so as to preserve any existing Docker images. Once the relocation +has been done, the Docker content will not be affected by any future targetfs +updates or accidental corruptions of the SD card. + +The following steps outline the process for Docker root directory relocation +assuming that the current Docker root is not at the desired location. If the +current location is the desired location then exit this procedure. + +1. Run 'Docker info' command and inspect the output. Locate the line with + content **Docker Root Dir**. It will list the current location. + +2. To preserve any existing images, export them to .tar files for importing + later into the new location. + +3. Inspect the content under /etc/docker to see if there is a file by name + **daemon.json**. If the file is not present then create **/etc/docker/docker.json** + and add the following content. Update the 'key:value' pair for the key "graph" + to reflect the desired root location. If the file already exists, then make + sure that the line with "graph" exists in the file and points to the desired + target location. + + .. code-block:: json + + { + "graph": "/run/media/nvme0n1/docker_root", + "storage-driver": "overlay", + "live-restore": true + } + + In the configuration above, the key/value pair + **'"graph": "/run/media/nvme0n1/docker_root"'** defines the root location + **'/run/media/nvme0n1/docker_root'.** + +4. Once the daemon.json file has been copied and updated, run the following + commands + + .. code-block:: bash + + $ systemctl restart docker + $ docker info + + Make sure that the new Docker root appears under **Docker Root Dir** value. + +5. If you exported the existing images in step (2) then import them and they + will appear under the new Docker root. + +6. Anytime the SD card is updated with a new targetfs, steps (1), (3), and + (4) need to be followed. + +**Additional references** + +| https://docs.docker.com/engine/reference/commandline/images/ +| https://docs.docker.com/engine/reference/commandline/ps/ diff --git a/source/edgeai/edgeai_dataflows.rst b/source/edgeai/edgeai_dataflows.rst new file mode 100644 index 000000000..a6a21daf2 --- /dev/null +++ b/source/edgeai/edgeai_dataflows.rst @@ -0,0 +1,475 @@ +.. _pub_edgeai_dataflows: + +================= +Edge AI dataflows +================= + +The reference edgeai application at a high level can be split into 3 parts, + + - Input pipeline - Grabs a frame from camera, video, image or RTSP source + - Compute pipeline - Performs pre-processing, inference and post-processing + - Output pipeline - Sends the output to display , file or stream over network + +Here are the some sample data flows for reference application and the corresponding +GStreamer launch strings that is generated. User can interact with the application via the +:ref:`pub_edgeai_configuration` + +.. _pub_edgeai_optiflow_data_flow: + +OpTIFlow +==================== + +Image Classification +-------------------- + +| **Input: USB Camera** +| **DL Task: Classification** +| **Output: Display** +| + +A frame is grabbed from an input source and split into two paths. +The "analytics" path resizes the input maintaining the aspect ratio and crops +the input to match the resolution required to run the deep learning network. +The "visualization" path is provided to the post-processing plugin which +overlays the detected classes. Post-processed output is given to HW mosaic plugin +which positions and resizes the output window on an empty background before sending to display. + +GStreamer pipeline: + +.. code-block:: bash + + v4l2src device=/dev/video-usb-cam0 io-mode=2 ! image/jpeg, width=1280, height=720 ! jpegdec ! tiovxdlcolorconvert ! video/x-raw, format=NV12 ! \ + tiovxmultiscaler name=split_01 src_0::roi-startx=80 src_0::roi-starty=45 src_0::roi-width=1120 src_0::roi-height=630 target=0 \ + \ + split_01. ! queue ! video/x-raw, width=280, height=224 ! tiovxmultiscaler target=1 ! video/x-raw, width=224, height=224 ! \ + tiovxdlpreproc model=/opt/model_zoo/TFL-CL-0000-mobileNetV1-mlperf out-pool-size=4 ! application/x-tensor-tiovx ! \ + tidlinferer target=1 model=/opt/model_zoo/TFL-CL-0000-mobileNetV1-mlperf ! post_0.tensor \ + \ + split_01. ! queue ! video/x-raw, width=1280, height=720 ! post_0.sink \ + \ + tidlpostproc name=post_0 model=/opt/model_zoo/TFL-CL-0000-mobileNetV1-mlperf alpha=0.4 viz-threshold=0.5 top-N=5 display-model=true ! queue ! mosaic_0. \ + \ + tiovxmosaic name=mosaic_0 target=1 src::pool-size=4 sink_0::startx="<320>" sink_0::starty="<150>" sink_0::widths="<1280>" sink_0::heights="<720>" ! \ + video/x-raw, format=NV12, width=1920, height=1080 ! queue ! tiperfoverlay title="Image Classification" overlay-type=graph ! kmssink driver-name=tidss sync=true + +.. figure:: ../images/edgeai/optiflow_image_classification.png + :scale: 75 + :align: center + + OpTIFlow pipeline for image classification demo with USB camera and display + +Object Detection +-------------------- + +| **Input: IMX219 Camera** +| **DL Task: Detection** +| **Output: File** +| + +A frame is grabbed from an input source and split into two paths. +The "analytics" path resizes the input to match the resolution required to run +the deep learning network. The "visualization" path is provided to the +post-processing plugin which overlays rectangles around detected objects. +Post-processed output is given to HW mosaic plugin which positions and resizes +the output window on an empty background before sending to encode and save as file. + +GStreamer pipeline: + +.. code-block:: bash + + v4l2src device=/dev/video-imx219-cam0 io-mode=5 ! queue leaky=2 ! video/x-bayer, width=1920, height=1080, format=rggb ! \ + tiovxisp sensor-name=SENSOR_SONY_IMX219_RPI dcc-isp-file=/opt/imaging/imx219/linear/dcc_viss.bin format-msb=7 sink_0::dcc-2a-file=/opt/imaging/imx219/linear/dcc_2a.bin sink_0::device=/dev/v4l-imx219-subdev0 ! video/x-raw, format=NV12 ! \ + tiovxmultiscaler name=split_01 src_0::roi-startx=0 src_0::roi-starty=0 src_0::roi-width=1920 src_0::roi-height=1080 target=0 \ + \ + split_01. ! queue ! video/x-raw, width=480, height=416 ! tiovxmultiscaler target=1 ! video/x-raw, width=416, height=416 ! \ + tiovxdlpreproc model=/opt/model_zoo/ONR-OD-8200-yolox-nano-lite-mmdet-coco-416x416 out-pool-size=4 ! application/x-tensor-tiovx ! \ + tidlinferer target=1 model=/opt/model_zoo/ONR-OD-8200-yolox-nano-lite-mmdet-coco-416x416 ! post_0.tensor \ + \ + split_01. ! queue ! video/x-raw, width=1280, height=720 ! post_0.sink \ + \ + tidlpostproc name=post_0 model=/opt/model_zoo/ONR-OD-8200-yolox-nano-lite-mmdet-coco-416x416 alpha=0.4 viz-threshold=0.6 top-N=5 display-model=true ! queue ! mosaic_0. \ + \ + tiovxmosaic name=mosaic_0 target=1 src::pool-size=4 sink_0::startx="<320>" sink_0::starty="<150>" sink_0::widths="<1280>" sink_0::heights="<720>" ! \ + video/x-raw,format=NV12, width=1920, height=1080 ! v4l2h264enc extra-controls="controls, frame_level_rate_control_enable=1, video_bitrate=10000000, video_gop_size=30" ! \ + h264parse ! matroskamux ! filesink location=/opt/edgeai-test-data/output/output_video.mkv + +.. figure:: ../images/edgeai/optiflow_object_detection.png + :scale: 75 + :align: center + + OpTIFlow pipeline for object detection demo with IMX219 camera and save to file + +Semantic Segmentation +--------------------- + +| **Input: H264 Video** +| **DL Task: Segmentation** +| **Output: Remote streaming** +| + +A frame is grabbed from an input source and split into two paths. +The "analytics" path resize the input to match the resolution required to run +the deep learning network. The "visualization" path is provided to the +post-processing plugin which blends each segmented pixel to a color map. +Post-processed output is given to HW mosaic plugin which positions and resizes +the output window on an empty background before sending to encode and stream. + +GStreamer pipeline: + +.. code-block:: bash + + multifilesrc location=/opt/edgeai-test-data/videos/video0_1280_768.h264 loop=true stop-index=-1 caps=video/x-h264,width=1280,height=768,framerate=30/1 ! \ + h264parse ! v4l2h264dec capture-io-mode=5 ! tiovxmemalloc pool-size=12 ! video/x-raw, format=NV12 ! \ + tiovxmultiscaler name=split_01 src_0::roi-startx=0 src_0::roi-starty=0 src_0::roi-width=1280 src_0::roi-height=768 target=0 \ + \ + split_01. ! queue ! video/x-raw, width=512, height=512 ! tiovxdlpreproc model=/opt/model_zoo/TFL-SS-2580-deeplabv3_mobv2-ade20k32-mlperf-512x512 out-pool-size=4 ! application/x-tensor-tiovx ! \ + tidlinferer target=1 model=/opt/model_zoo/TFL-SS-2580-deeplabv3_mobv2-ade20k32-mlperf-512x512 ! post_0.tensor \ + \ + split_01. ! queue ! video/x-raw, width=1280, height=720 ! post_0.sink \ + \ + tidlpostproc name=post_0 model=/opt/model_zoo/TFL-SS-2580-deeplabv3_mobv2-ade20k32-mlperf-512x512 alpha=0.4 viz-threshold=0.5 top-N=5 display-model=true ! queue ! mosaic_0. \ + \ + tiovxmosaic name=mosaic_0 target=1 src::pool-size=4 sink_0::startx="<320>" sink_0::starty="<150>" sink_0::widths="<1280>" sink_0::heights="<720>" ! \ + video/x-raw,format=NV12, width=1920, height=1080 ! queue ! tiperfoverlay title="Semantic Segmentation" overlay-type=graph ! \ + jpegenc ! multipartmux boundary=spionisto ! rndbuffersize max=65000 ! udpsink host=127.0.0.1 port=8081 sync=false + +.. figure:: ../images/edgeai/optiflow_semantic_segmentation.png + :scale: 75 + :align: center + + OpTIFlow pipeline for semantic segmentation demo with file input and remote streaming + +Single Input Multi Inference +---------------------------- + +| **Input: H264 Video** +| **DL Task: Detection, Detection, Classification, Segmentation** +| **Output: Display** +| + +GStreamer pipeline: + +.. code-block:: bash + + multifilesrc location=/opt/edgeai-test-data/videos/video0_1280_768.h264 loop=true stop-index=-1 caps=video/x-h264,width=1280,height=768,framerate=30/1 ! h264parse ! v4l2h264dec capture-io-mode=5 ! tiovxmemalloc pool-size=12 ! video/x-raw, format=NV12 ! \ + tee name=tee_split0 \ + tee_split0. ! queue ! tiovxmultiscaler name=split_01 src_0::roi-startx=0 src_0::roi-starty=0 src_0::roi-width=1280 src_0::roi-height=768 src_2::roi-startx=0 src_2::roi-starty=0 src_2::roi-width=1280 src_2::roi-height=768 target=0 \ + tee_split0. ! queue ! tiovxmultiscaler name=split_02 src_0::roi-startx=80 src_0::roi-starty=48 src_0::roi-width=1120 src_0::roi-height=672 src_2::roi-startx=0 src_2::roi-starty=0 src_2::roi-width=1280 src_2::roi-height=768 target=0 \ + \ + split_01. ! queue ! video/x-raw, width=320, height=320 ! tiovxdlpreproc model=/opt/model_zoo/TFL-OD-2020-ssdLite-mobDet-DSP-coco-320x320 out-pool-size=4 ! application/x-tensor-tiovx ! tidlinferer target=1 model=/opt/model_zoo/TFL-OD-2020-ssdLite-mobDet-DSP-coco-320x320 ! post_0.tensor \ + split_01. ! queue ! video/x-raw, width=640, height=360 ! post_0.sink \ + tidlpostproc name=post_0 model=/opt/model_zoo/TFL-OD-2020-ssdLite-mobDet-DSP-coco-320x320 alpha=0.400000 viz-threshold=0.600000 top-N=5 display-model=true ! queue ! mosaic_0. \ + \ + split_01. ! queue ! video/x-raw, width=416, height=416 ! tiovxdlpreproc model=/opt/model_zoo/ONR-OD-8200-yolox-nano-lite-mmdet-coco-416x416 out-pool-size=4 ! application/x-tensor-tiovx ! tidlinferer target=1 model=/opt/model_zoo/ONR-OD-8200-yolox-nano-lite-mmdet-coco-416x416 ! post_1.tensor \ + split_01. ! queue ! video/x-raw, width=640, height=360 ! post_1.sink \ + tidlpostproc name=post_1 model=/opt/model_zoo/ONR-OD-8200-yolox-nano-lite-mmdet-coco-416x416 alpha=0.400000 viz-threshold=0.600000 top-N=5 display-model=true ! queue ! mosaic_0. \ + \ + split_02. ! queue ! video/x-raw, width=280, height=224 ! tiovxmultiscaler target=1 ! video/x-raw, width=224, height=224 ! tiovxdlpreproc model=/opt/model_zoo/ONR-CL-6360-regNetx-200mf out-pool-size=4 ! application/x-tensor-tiovx ! tidlinferer target=1 model=/opt/model_zoo/ONR-CL-6360-regNetx-200mf ! post_2.tensor \ + split_02. ! queue ! video/x-raw, width=640, height=360 ! post_2.sink \ + tidlpostproc name=post_2 model=/opt/model_zoo/ONR-CL-6360-regNetx-200mf alpha=0.400000 viz-threshold=0.500000 top-N=5 display-model=true ! queue ! mosaic_0. \ + \ + split_02. ! queue ! video/x-raw, width=512, height=512 ! tiovxdlpreproc model=/opt/model_zoo/ONR-SS-8610-deeplabv3lite-mobv2-ade20k32-512x512 out-pool-size=4 ! application/x-tensor-tiovx ! tidlinferer target=1 model=/opt/model_zoo/ONR-SS-8610-deeplabv3lite-mobv2-ade20k32-512x512 ! post_3.tensor \ + split_02. ! queue ! video/x-raw, width=640, height=360 ! post_3.sink \ + tidlpostproc name=post_3 model=/opt/model_zoo/ONR-SS-8610-deeplabv3lite-mobv2-ade20k32-512x512 alpha=0.400000 viz-threshold=0.500000 top-N=5 display-model=true ! queue ! mosaic_0. \ + \ + \ + tiovxmosaic name=mosaic_0 target=1 src::pool-size=4 \ + sink_0::startx="<320>" sink_0::starty="<150>" sink_0::widths="<640>" sink_0::heights="<360>" \ + sink_1::startx="<960>" sink_1::starty="<150>" sink_1::widths="<640>" sink_1::heights="<360>" \ + sink_2::startx="<320>" sink_2::starty="<530>" sink_2::widths="<640>" sink_2::heights="<360>" \ + sink_3::startx="<960>" sink_3::starty="<530>" sink_3::widths="<640>" sink_3::heights="<360>" \ + ! video/x-raw,format=NV12, width=1920, height=1080 ! queue ! tiperfoverlay title="Single Input, Multi Inference" overlay-type=graph ! kmssink driver-name=tidss sync=false + +.. figure:: ../images/edgeai/optiflow_single_input_multi_infer.png + :scale: 120 + :align: center + + OpTIFlow pipeline for single input multi inference + +Multi Input Multi Inference +---------------------------- + +| **Input: USB Camera, H264 Video** +| **DL Task: Detection, Detection, Classification, Segmentation** +| **Output: Display** +| + +GStreamer pipeline: + +.. code-block:: bash + + v4l2src device=/dev/video-usb-cam0 io-mode=2 ! image/jpeg, width=1280, height=720 ! jpegdec ! tiovxdlcolorconvert ! video/x-raw, format=NV12 ! \ + tiovxmultiscaler name=split_01 src_0::roi-startx=0 src_0::roi-starty=0 src_0::roi-width=1280 src_0::roi-height=720 src_2::roi-startx=0 src_2::roi-starty=0 src_2::roi-width=1280 src_2::roi-height=720 target=0 \ + \ + split_01. ! queue ! video/x-raw, width=320, height=320 ! tiovxdlpreproc model=/opt/model_zoo/TFL-OD-2020-ssdLite-mobDet-DSP-coco-320x320 out-pool-size=4 ! application/x-tensor-tiovx ! tidlinferer target=1 model=/opt/model_zoo/TFL-OD-2020-ssdLite-mobDet-DSP-coco-320x320 ! post_0.tensor \ + split_01. ! queue ! video/x-raw, width=640, height=360 ! post_0.sink \ + tidlpostproc name=post_0 model=/opt/model_zoo/TFL-OD-2020-ssdLite-mobDet-DSP-coco-320x320 alpha=0.400000 viz-threshold=0.600000 top-N=5 display-model=true ! queue ! mosaic_0. \ + \ + split_01. ! queue ! video/x-raw, width=416, height=416 ! tiovxdlpreproc model=/opt/model_zoo/ONR-OD-8200-yolox-nano-lite-mmdet-coco-416x416 out-pool-size=4 ! application/x-tensor-tiovx ! tidlinferer target=1 model=/opt/model_zoo/ONR-OD-8200-yolox-nano-lite-mmdet-coco-416x416 ! post_1.tensor \ + split_01. ! queue ! video/x-raw, width=640, height=360 ! post_1.sink \ + tidlpostproc name=post_1 model=/opt/model_zoo/ONR-OD-8200-yolox-nano-lite-mmdet-coco-416x416 alpha=0.400000 viz-threshold=0.600000 top-N=5 display-model=true ! queue ! mosaic_0. \ + \ + multifilesrc location=/opt/edgeai-test-data/videos/video0_1280_768.h264 loop=true stop-index=-1 caps=video/x-h264,width=1280,height=768,framerate=30/1 ! h264parse ! v4l2h264dec capture-io-mode=5 ! tiovxmemalloc pool-size=12 ! video/x-raw, format=NV12 ! \ + tiovxmultiscaler name=split_11 src_0::roi-startx=80 src_0::roi-starty=48 src_0::roi-width=1120 src_0::roi-height=672 src_2::roi-startx=0 src_2::roi-starty=0 src_2::roi-width=1280 src_2::roi-height=768 target=1 \ + \ + split_11. ! queue ! video/x-raw, width=280, height=224 ! tiovxmultiscaler target=0 ! video/x-raw, width=224, height=224 ! tiovxdlpreproc model=/opt/model_zoo/ONR-CL-6360-regNetx-200mf out-pool-size=4 ! application/x-tensor-tiovx ! tidlinferer target=1 model=/opt/model_zoo/ONR-CL-6360-regNetx-200mf ! post_2.tensor \ + split_11. ! queue ! video/x-raw, width=640, height=360 ! post_2.sink \ + tidlpostproc name=post_2 model=/opt/model_zoo/ONR-CL-6360-regNetx-200mf alpha=0.400000 viz-threshold=0.500000 top-N=5 display-model=true ! queue ! mosaic_0. \ + \ + split_11. ! queue ! video/x-raw, width=512, height=512 ! tiovxdlpreproc model=/opt/model_zoo/ONR-SS-8610-deeplabv3lite-mobv2-ade20k32-512x512 out-pool-size=4 ! application/x-tensor-tiovx ! tidlinferer target=1 model=/opt/model_zoo/ONR-SS-8610-deeplabv3lite-mobv2-ade20k32-512x512 ! post_3.tensor \ + split_11. ! queue ! video/x-raw, width=640, height=360 ! post_3.sink \ + tidlpostproc name=post_3 model=/opt/model_zoo/ONR-SS-8610-deeplabv3lite-mobv2-ade20k32-512x512 alpha=0.400000 viz-threshold=0.500000 top-N=5 display-model=true ! queue ! mosaic_0. \ + \ + \ + tiovxmosaic name=mosaic_0 target=1 src::pool-size=4 \ + sink_0::startx="<320>" sink_0::starty="<150>" sink_0::widths="<640>" sink_0::heights="<360>" \ + sink_1::startx="<960>" sink_1::starty="<150>" sink_1::widths="<640>" sink_1::heights="<360>" \ + sink_2::startx="<320>" sink_2::starty="<530>" sink_2::widths="<640>" sink_2::heights="<360>" \ + sink_3::startx="<960>" sink_3::starty="<530>" sink_3::widths="<640>" sink_3::heights="<360>" \ + ! video/x-raw,format=NV12, width=1920, height=1080 ! queue ! tiperfoverlay title="Multi Input, Multi Inference" overlay-type=graph ! kmssink driver-name=tidss sync=false + + +.. figure:: ../images/edgeai/optiflow_multi_input_multi_infer.png + :scale: 120 + :align: center + + OpTIFlow pipeline for multi input multi inference + +Python/C++ apps +====================== + +Image Classification +-------------------- + +| **Input: USB Camera** +| **DL Task: Classification** +| **Output: Display** +| + +A frame is grabbed from an input source and split into two paths. +The "analytics" path resizes the input maintaining the aspect ratio and crops +the input to match the resolution required to run the deep learning network. +A buffer is pulled from "analytics" path using appsink and provided to OSRT +libraries for inference. Another buffer is pulled from "visualization" path and +if provided to the post-processing module along with inference result. Post-processing +module uses OpenCV to overlay the detected classes. Post-processed output is given +to HW mosaic plugin which positions and resizes the output window on an empty +background before sending to display. + +GStreamer input pipeline: + +.. code-block:: bash + + v4l2src device=/dev/video-usb-cam0 ! capsfilter caps="image/jpeg, width=(int)1280, height=(int)720;" ! jpegdec ! tiovxdlcolorconvert ! capsfilter caps="video/x-raw, format=(string)NV12;" ! tiovxmultiscaler name=split_01 + split_01. ! queue ! capsfilter caps="video/x-raw, width=(int)1280, height=(int)720;" ! tiovxdlcolorconvert out-pool-size=4 ! capsfilter caps="video/x-raw, format=(string)RGB;" ! appsink max-buffers=2 drop=True name=sen_0 + split_01. ! queue ! capsfilter caps="video/x-raw, width=(int)454, height=(int)256;" ! tiovxdlcolorconvert out-pool-size=4 ! capsfilter caps="video/x-raw, format=(string)RGB;" ! videobox qos=True left=115 right=115 top=16 bottom=16 ! tiovxdlpreproc out-pool-size=4 channel-order=1 data-type=3 ! capsfilter caps="application/x-tensor-tiovx;" ! appsink max-buffers=2 drop=True name=pre_0 + + +GStreamer output pipeline: + +.. code-block:: bash + + appsrc do-timestamp=True format=3 block=True name=post_0 ! tiovxdlcolorconvert ! capsfilter caps="video/x-raw, format=(string)NV12, width=(int)1280, height=(int)720;" ! queue ! mosaic_0.sink_0 + + tiovxmosaic target=1 background=/tmp/background_0 name=mosaic_0 src::pool-size=4 + sink_0::startx="<320>" sink_0::starty="<150>" sink_0::widths="<1280>" sink_0::heights="<720>" + ! capsfilter caps="video/x-raw, format=(string)NV12, width=(int)1920, height=(int)1080;" ! queue ! tiperfoverlay title=Image Classification ! kmssink sync=false force-modesetting=True + +.. figure:: ../images/edgeai/edgeai_app_image_classification.png + :scale: 75 + :align: center + + Python/C++ application data-flow for image classification demo with USB camera and display + +Object Detection +-------------------- + +| **Input: IMX219 Camera** +| **DL Task: Detection** +| **Output: File** +| + +A frame is grabbed from an input source and split into two paths. +The "analytics" path resizes the input maintaining the aspect ratio and crops +the input to match the resolution required to run the deep learning network. +A buffer is pulled from "analytics" path using appsink and provided to OSRT +libraries for inference. Another buffer is pulled from "visualization" path and +if provided to the post-processing module along with inference result. Post-processing +module uses OpenCV to overlay rectangles around detected objects. +Post-processed output is given to HW mosaic plugin which positions and resizes +the output window on an empty background before sending to encode and save as file. + +GStreamer input pipeline: + +.. code-block:: bash + + v4l2src device=/dev/video-imx219-cam0 io-mode=5 pixel-aspect-ratio=None ! queue leaky=2 ! capsfilter caps="video/x-bayer, width=(int)1920, height=(int)1080, format=(string)rggb;" ! tiovxisp dcc-isp-file=/opt/imaging/imx219/linear/dcc_viss.bin sensor-name=SENSOR_SONY_IMX219_RPI ! capsfilter caps="video/x-raw, format=(string)NV12;" ! tiovxmultiscaler name=split_01 + split_01. ! queue ! capsfilter caps="video/x-raw, width=(int)1280, height=(int)720;" ! tiovxdlcolorconvert out-pool-size=4 ! capsfilter caps="video/x-raw, format=(string)RGB;" ! appsink max-buffers=2 drop=True name=sen_0 + split_01. ! queue ! capsfilter caps="video/x-raw, width=(int)1168, height=(int)748;" ! tiovxmultiscaler target=1 ! capsfilter caps="video/x-raw, width=(int)416, height=(int)416;" ! tiovxdlpreproc out-pool-size=4 data-type=3 tensor-format=1 ! capsfilter caps="application/x-tensor-tiovx;" ! appsink max-buffers=2 drop=True name=pre_0 + +GStreamer output pipeline: + +.. code-block:: bash + + appsrc do-timestamp=True format=3 block=True name=post_0 ! tiovxdlcolorconvert ! capsfilter caps="video/x-raw, format=(string)NV12, width=(int)1280, height=(int)720;" ! queue ! mosaic_0.sink_0 + + tiovxmosaic target=1 background=/tmp/background_0 name=mosaic_0 src::pool-size=2 + sink_0::startx="<320>" sink_0::starty="<150>" sink_0::widths="<1280>" sink_0::heights="<720>" + ! capsfilter caps="video/x-raw, format=(string)NV12, width=(int)1920, height=(int)1080;" ! + v4l2h264enc extra-controls="controls, frame_level_rate_control_enable=(int)1, video_bitrate=(int)10000000, video_gop_size=(int)30;" ! + h264parse ! matroskamux writing-app=GStreamer Matroska muxer ! filesink sync=False location=/opt/edgeai-test-data/output/output_video.mkv + +.. figure:: ../images/edgeai/edgeai_app_object_detection.png + :scale: 75 + :align: center + + Python/C++ application data-flow for object detection demo with IMX219 camera and save to file + +Semantic Segmentation +--------------------- + +| **Input: H264 Video** +| **DL Task: Segmentation** +| **Output: Remote streaming** +| + +A frame is grabbed from an input source and split into two paths. +The "analytics" path resizes the input maintaining the aspect ratio and crops +the input to match the resolution required to run the deep learning network. +A buffer is pulled from "analytics" path using appsink and provided to OSRT +libraries for inference. Another buffer is pulled from "visualization" path and +if provided to the post-processing module along with inference result. Post-processing +module uses OpenCV to blend each segmented pixel to a color map. +Post-processed output is given to HW mosaic plugin which positions and resizes +the output window on an empty background before sending to encode and stream. + +GStreamer input pipeline: + +.. code-block:: bash + + multifilesrc location=/opt/edgeai-test-data/videos/video0_1280_768.h264 loop=True ! h264parse ! v4l2h264dec capture-io-mode=5 ! tiovxmemalloc pool-size=12 ! capsfilter caps="video/x-raw, format=(string)NV12;" ! tiovxmultiscaler name=split_01 + split_01. ! queue ! capsfilter caps="video/x-raw, width=(int)1280, height=(int)720;" ! tiovxdlcolorconvert out-pool-size=4 ! capsfilter caps="video/x-raw, format=(string)RGB;" ! appsink max-buffers=2 drop=True name=sen_0 + split_01. ! queue ! capsfilter caps="video/x-raw, width=(int)512, height=(int)512;" ! tiovxdlpreproc out-pool-size=4 channel-order=1 data-type=3 ! capsfilter caps="application/x-tensor-tiovx;" ! appsink max-buffers=2 drop=True name=pre_0 + +GStreamer output pipeline: + +.. code-block:: bash + + appsrc do-timestamp=True format=3 block=True name=post_0 ! tiovxdlcolorconvert ! capsfilter caps="video/x-raw, format=(string)NV12, width=(int)1280, height=(int)720;" ! queue ! mosaic_0.sink_0 + + tiovxmosaic target=1 background=/tmp/background_0 name=mosaic_0 src::pool-size=4 + sink_0::startx="<320>" sink_0::starty="<150>" sink_0::widths="<1280>" sink_0::heights="<720>" + ! capsfilter caps="video/x-raw, format=(string)NV12, width=(int)1920, height=(int)1080;" ! queue ! tiperfoverlay title=Semantic Segmentation ! + v4l2jpegenc ! multipartmux boundary=spionisto ! rndbuffersize max=65000 ! udpsink sync=False clients=127.0.0.1:8081 host=127.0.0.1 port=8081 + +.. figure:: ../images/edgeai/edgeai_app_semantic_segmentation.png + :scale: 75 + :align: center + + Python/C++ application data-flow for semantic segmentation demo with file input and remote streaming + +Single Input Multi Inference +---------------------------- + +| **Input: H264 Video** +| **DL Task: Detection, Detection, Classification, Segmentation** +| **Output: Display** +| + +GStreamer input pipeline: + +.. code-block:: bash + + multifilesrc location=/opt/edgeai-test-data/videos/video0_1280_768.h264 loop=True ! h264parse ! v4l2h264dec capture-io-mode=5 ! tiovxmemalloc pool-size=12 ! capsfilter caps="video/x-raw, format=(string)NV12;" ! tee name=tee_split0 + tee_split0. ! queue ! tiovxmultiscaler name=split_01 + split_01. ! queue ! capsfilter caps="video/x-raw, width=(int)640, height=(int)360;" ! tiovxdlcolorconvert out-pool-size=4 ! capsfilter caps="video/x-raw, format=(string)RGB;" ! appsink max-buffers=2 drop=True name=sen_0 + split_01. ! queue ! capsfilter caps="video/x-raw, width=(int)320, height=(int)320;" ! tiovxdlpreproc out-pool-size=4 channel-order=1 data-type=3 ! capsfilter caps="application/x-tensor-tiovx;" ! appsink max-buffers=2 drop=True name=pre_0 + tee_split0. ! queue ! tiovxmultiscaler name=split_02 + split_02. ! queue ! capsfilter caps="video/x-raw, width=(int)640, height=(int)360;" ! tiovxdlcolorconvert out-pool-size=4 ! capsfilter caps="video/x-raw, format=(string)RGB;" ! appsink max-buffers=2 drop=True name=sen_1 + split_02. ! queue ! capsfilter caps="video/x-raw, width=(int)416, height=(int)416;" ! tiovxdlpreproc out-pool-size=4 data-type=3 tensor-format=1 ! capsfilter caps="application/x-tensor-tiovx;" ! appsink max-buffers=2 drop=True name=pre_1 + tee_split0. ! queue ! tiovxmultiscaler name=split_03 + split_03. ! queue ! capsfilter caps="video/x-raw, width=(int)640, height=(int)360;" ! tiovxdlcolorconvert out-pool-size=4 ! capsfilter caps="video/x-raw, format=(string)RGB;" ! appsink max-buffers=2 drop=True name=sen_2 + split_03. ! queue ! capsfilter caps="video/x-raw, width=(int)426, height=(int)256;" ! tiovxdlcolorconvert out-pool-size=4 ! capsfilter caps="video/x-raw, format=(string)RGB;" ! videobox qos=True left=101 right=101 top=16 bottom=16 ! tiovxdlpreproc out-pool-size=4 data-type=3 tensor-format=1 ! capsfilter caps="application/x-tensor-tiovx;" ! appsink max-buffers=2 drop=True name=pre_2 + tee_split0. ! queue ! tiovxmultiscaler name=split_04 + split_04. ! queue ! capsfilter caps="video/x-raw, width=(int)640, height=(int)360;" ! tiovxdlcolorconvert out-pool-size=4 ! capsfilter caps="video/x-raw, format=(string)RGB;" ! appsink max-buffers=2 drop=True name=sen_3 + split_04. ! queue ! capsfilter caps="video/x-raw, width=(int)512, height=(int)512;" ! tiovxdlpreproc out-pool-size=4 data-type=3 ! capsfilter caps="application/x-tensor-tiovx;" ! appsink max-buffers=2 drop=True name=pre_3 + +GStreamer output pipeline: + +.. code-block:: bash + + appsrc do-timestamp=True format=3 block=True name=post_0 ! tiovxdlcolorconvert ! capsfilter caps="video/x-raw, format=(string)NV12, width=(int)640, height=(int)360;" ! queue ! mosaic_0.sink_0 + + appsrc do-timestamp=True format=3 block=True name=post_1 ! tiovxdlcolorconvert ! capsfilter caps="video/x-raw, format=(string)NV12, width=(int)640, height=(int)360;" ! queue ! mosaic_0.sink_1 + + appsrc do-timestamp=True format=3 block=True name=post_2 ! tiovxdlcolorconvert ! capsfilter caps="video/x-raw, format=(string)NV12, width=(int)640, height=(int)360;" ! queue ! mosaic_0.sink_2 + + appsrc do-timestamp=True format=3 block=True name=post_3 ! tiovxdlcolorconvert ! capsfilter caps="video/x-raw, format=(string)NV12, width=(int)640, height=(int)360;" ! queue ! mosaic_0.sink_3 + + tiovxmosaic target=1 background=/tmp/background_0 name=mosaic_0 src::pool-size=4 + sink_0::startx="<320>" sink_0::starty="<150>" sink_0::widths="<640>" sink_0::heights="<360>" + sink_1::startx="<960>" sink_1::starty="<150>" sink_1::widths="<640>" sink_1::heights="<360>" + sink_2::startx="<320>" sink_2::starty="<530>" sink_2::widths="<640>" sink_2::heights="<360>" + sink_3::startx="<960>" sink_3::starty="<530>" sink_3::widths="<640>" sink_3::heights="<360>" + ! capsfilter caps="video/x-raw, format=(string)NV12, width=(int)1920, height=(int)1080;" ! queue ! tiperfoverlay title=Single Input, Multi Inference ! kmssink sync=false driver-name=tidss force-modesetting=True + +.. figure:: ../images/edgeai/edgeai_app_single_input_multi_infer.png + :scale: 120 + :align: center + + Python/C++ application data-flow for single input multi inference + +Multi Input Multi Inference +---------------------------- + +| **Input: USB Camera, H264 Video** +| **DL Task: Detection, Detection, Classification, Segmentation** +| **Output: Display** +| + +GStreamer input pipeline: + +.. code-block:: bash + + v4l2src device=/dev/video-usb-cam0 ! capsfilter caps="image/jpeg, width=(int)1280, height=(int)720;" ! jpegdec ! tiovxdlcolorconvert ! capsfilter caps="video/x-raw, format=(string)NV12;" ! tee name=tee_split0 + tee_split0. ! queue ! tiovxmultiscaler name=split_01 + split_01. ! queue ! capsfilter caps="video/x-raw, width=(int)640, height=(int)360;" ! tiovxdlcolorconvert out-pool-size=4 ! capsfilter caps="video/x-raw, format=(string)RGB;" ! appsink max-buffers=2 drop=True name=sen_0 + split_01. ! queue ! capsfilter caps="video/x-raw, width=(int)320, height=(int)320;" ! tiovxdlpreproc out-pool-size=4 channel-order=1 data-type=3 ! capsfilter caps="application/x-tensor-tiovx;" ! appsink max-buffers=2 drop=True name=pre_0 + tee_split0. ! queue ! tiovxmultiscaler name=split_02 + split_02. ! queue ! capsfilter caps="video/x-raw, width=(int)640, height=(int)360;" ! tiovxdlcolorconvert out-pool-size=4 ! capsfilter caps="video/x-raw, format=(string)RGB;" ! appsink max-buffers=2 drop=True name=sen_1 + split_02. ! queue ! capsfilter caps="video/x-raw, width=(int)416, height=(int)416;" ! tiovxdlpreproc out-pool-size=4 data-type=3 tensor-format=1 ! capsfilter caps="application/x-tensor-tiovx;" ! appsink max-buffers=2 drop=True name=pre_1 + + multifilesrc location=/opt/edgeai-test-data/videos/video0_1280_768.h264 index=1 loop=True ! h264parse ! v4l2h264dec capture-io-mode=5 ! tiovxmemalloc pool-size=12 ! capsfilter caps="video/x-raw, format=(string)NV12;" ! tee name=tee_split1 + tee_split1. ! queue ! tiovxmultiscaler name=split_11 + split_11. ! queue ! capsfilter caps="video/x-raw, width=(int)640, height=(int)360;" ! tiovxdlcolorconvert out-pool-size=4 ! capsfilter caps="video/x-raw, format=(string)RGB;" ! appsink max-buffers=2 drop=True name=sen_2 + split_11. ! queue ! capsfilter caps="video/x-raw, width=(int)426, height=(int)256;" ! tiovxdlcolorconvert out-pool-size=4 ! capsfilter caps="video/x-raw, format=(string)RGB;" ! videobox qos=True left=101 right=101 top=16 bottom=16 ! tiovxdlpreproc out-pool-size=4 data-type=3 tensor-format=1 ! capsfilter caps="application/x-tensor-tiovx;" ! appsink max-buffers=2 drop=True name=pre_2 + tee_split1. ! queue ! tiovxmultiscaler name=split_12 + split_12. ! queue ! capsfilter caps="video/x-raw, width=(int)640, height=(int)360;" ! tiovxdlcolorconvert out-pool-size=4 ! capsfilter caps="video/x-raw, format=(string)RGB;" ! appsink max-buffers=2 drop=True name=sen_3 + split_12. ! queue ! capsfilter caps="video/x-raw, width=(int)512, height=(int)512;" ! tiovxdlpreproc out-pool-size=4 data-type=3 ! capsfilter caps="application/x-tensor-tiovx;" ! appsink max-buffers=2 drop=True name=pre_3 + + +GStreamer output pipeline: + +.. code-block:: bash + + appsrc do-timestamp=True format=3 block=True name=post_0 ! tiovxdlcolorconvert ! capsfilter caps="video/x-raw, format=(string)NV12, width=(int)640, height=(int)360;" ! queue ! mosaic_0.sink_0 + + appsrc do-timestamp=True format=3 block=True name=post_1 ! tiovxdlcolorconvert ! capsfilter caps="video/x-raw, format=(string)NV12, width=(int)640, height=(int)360;" ! queue ! mosaic_0.sink_1 + + appsrc do-timestamp=True format=3 block=True name=post_2 ! tiovxdlcolorconvert ! capsfilter caps="video/x-raw, format=(string)NV12, width=(int)640, height=(int)360;" ! queue ! mosaic_0.sink_2 + + appsrc do-timestamp=True format=3 block=True name=post_3 ! tiovxdlcolorconvert ! capsfilter caps="video/x-raw, format=(string)NV12, width=(int)640, height=(int)360;" ! queue ! mosaic_0.sink_3 + + tiovxmosaic target=1 background=/tmp/background_0 name=mosaic_0 src::pool-size=4 + sink_0::startx="<320>" sink_0::starty="<150>" sink_0::widths="<640>" sink_0::heights="<360>" + sink_1::startx="<960>" sink_1::starty="<150>" sink_1::widths="<640>" sink_1::heights="<360>" + sink_2::startx="<320>" sink_2::starty="<530>" sink_2::widths="<640>" sink_2::heights="<360>" + sink_3::startx="<960>" sink_3::starty="<530>" sink_3::widths="<640>" sink_3::heights="<360>" + ! capsfilter caps="video/x-raw, format=(string)NV12, width=(int)1920, height=(int)1080;" ! queue ! tiperfoverlay title=Multi Input, Multi Inference ! kmssink sync=false driver-name=tidss force-modesetting=True + + +.. figure:: ../images/edgeai/edgeai_app_multi_input_multi_infer.png + :scale: 120 + :align: center + + Python/C++ application data-flow for multi input multi inference \ No newline at end of file diff --git a/source/edgeai/inference_models.rst b/source/edgeai/inference_models.rst new file mode 100644 index 000000000..2fd0fa613 --- /dev/null +++ b/source/edgeai/inference_models.rst @@ -0,0 +1,232 @@ +.. _pub_edgeai_inference_models: + +==================== +Deep learning models +==================== + +Neural networks run on TI's C7xMMA accelerator using the TI Deep Learning (TIDL) +software. Development tools are available for different levels of expertise to help +users implement and optimize deep neural network (DNN) models for the +|__PART_FAMILY_NAME__| SoC. + +Deep learning models and development can be broken down into multiple phases: +Train, Compile, Evaluate, and Deploy. This can be intersected at various stages, +allowing developers to begin as it best suites them. + +.. figure:: ../images/edgeai/DNN-dev-flow-top.png + :align: center + :scale: 80% + +For each task, there is a GUI-based (also called No Code or Low Code) +tool available within `Edge AI Studio `_ and a +programmatic / command line tool from the `TI github `_ + +In each case, the goal is to acquire or generate a series of +`"Model Artifacts" <#dnn-directory-structure>`_ that may be deployed to the +|__PART_FAMILY_NAME__| SoC. + +Pretrained Model Evaluation +=========================== + +`TI Edge AI Model Zoo `__ +is a large collection of deep learning models validated to work on TI processors +for Edge AI. It hosts several pre-trained and pre-compiled model artifacts +for TI hardware. + +There are multiple tools that can used to download or generate model artifacts for +pretrained models. No/Low code approaches use Edge AI Studio to find or compile +models from TI's model zoo. Model Analyzer (part of Edge AI Studio) and edgeai-tidl-tools +expose the programming interfaces used to generate these artifacts, allowing developers to +understand and experiment with the tools. For these programmatic tools, validated +out-of-box examples are available. + +.. figure:: ../images/edgeai/DNN-modelzoo-eval.png + :align: center + :scale: 30% + +#. https://dev.ti.com/gallery/view/7478209/edgeai-modelselection/ver/1.0.0/ +#. https://dev.ti.com/edgeaisession +#. https://github.com/TexasInstruments/edgeai-tidl-tools +#. https://github.com/TexasInstruments/edgeai-tensorlab/tree/main/edgeai-modelzoo +#. https://github.com/TexasInstruments/edgeai-gst-apps + + +Precompiled model artifacts may be downloaded directly from the TI model zoo with a browser +or by using the `Model Downloader Tool`_ directly in the SDK. + +Model Downloader Tool +--------------------- + +Use the **Model Downloader Tool** in the SDK to download more models on target as shown, + +.. code-block:: bash + + /opt/edgeai-gst-apps# ./download_models.sh + +The script will launch an interactive menu showing the list of available, +pre-imported models for download. The downloaded models will be placed +under ``/opt/model_zoo/`` directory + +.. figure:: ../images/edgeai/model_downloader.png + :align: center + + Model downloader tool menu option to download models + +The script can also be used in a non-interactive way as shown below: + +.. code-block:: bash + + /opt/edgeai-gst-apps# ./download_models.sh --help + +.. _pub_edgeai_model_development_for_beginners: + +Model Training Tools +==================== + +Models within the TI model zoo are used as a starting point for "Transfer Learning", +and may be retrained for custom use-cases on the developer's dataset. This is considered +a "Bring Your Own Data" or BYOD flow. Custom DNN architectures are not supported in BYOD. +The set of Model Zoo models that are available for retraining is hand-picked to be an +optimally performing subset of the entire Model Zoo. + +.. figure:: ../images/edgeai/DNN-BYOD.png + :align: center + :scale: 30% + +#. https://dev.ti.com/modelcomposer/ +#. https://github.com/TexasInstruments/edgeai-tensorlab/tree/main/edgeai-modelmaker + +`Edge AI Studio `__ **Model Composer** +is an integrated environment to allow end to end development of AI application +with all necessary stages including data collection, annotation, training, +compilation and deployment of machine learning models. It is bundled with optimal +models from model zoo for user to select at different performance points. It allows +development flow of bring your own data (**BYOD**) with TI recommended models. +Object detection, classification, and segmentation tasks are currntly supported. + +`EdgeAI-ModelMaker `__ +is an end-to-end model development tool that integrates dataset handling, +model training and model compilation and provides a simple config file interface +that is friendly to beginners. Model training and Model compilation tools are +automatically installed as part of the setup of the ModelMaker. This tool can accept +annotated datasets from another tool (including Model Composer) as explained in its documentation. +The output of ModelMaker is a trained and compiled DNN model artifact package +that has all the required side information to be consumed by this SDK. +Training code is open source and available for modification as necessary. + +.. _pub_edgeai_import_custom_models: + +Import Custom Models +==================== + +The Processor SDK Linux Edge AI for |__PART_FAMILY_NAME__| supports importing +pre-trained custom models to run inference on target using the "Bring Your Own Model" +or BYOM flow. + +Most experienced Edge AI developers will use this flow on their own models, since they +may already have trained models with acceptable accuracy. For best performance, it is +important that all layers are supported -- see the +`set of supported layer/operators here `__ + +A developer will need to bring their model in ONNX or TFLITE format, information about +preprocessing (channel ordering, mean/scale values, input resolution), and a small set of +images for running calibration (part of quantization) and accuracy validation. Object +detection models like YOLO-based architectures also require a "PROTOTXT" file describing +the `detection head's architecture `__. + +It is recommended to validate the entire flow with +simple file-based `examples `__ +provided in `Edge AI TIDL tools `_. +Note that this compilation or 'import' step of the BYOM flow is handled on an x86 +PC or server. The |__PART_FAMILY_NAME__| SoC cannot natively compile a model for itself. + +.. figure:: ../images/edgeai/DNN-BYOM.png + :align: center + :scale: 30% + +#. https://dev.ti.com/edgeaisession +#. https://github.com/TexasInstruments/edgeai-tidl-tools +#. https://github.com/TexasInstruments/edgeai-tensorlab/tree/main/edgeai-benchmark + +This compilation is an important step, and there are many settings used to alter and +optimize the resulting model artifacts. Please refer to the +`TIDL User Options `__ +for the full list of settings. These settings are applicable in each of the BYOM tools, +since they all use the same underlying Python open source runtimes (ONNX, +Tensorflow Lite, TVM) with TIDL backend. Please view documentation in the corresponding +tools for their exact usage, although common documentation and debugging guidance is found +in the +`edgeai-tidl-tools documentation `__. +This documentation also describes additional features like model optimization, +quantization-aware training, how to use multiple C7x cores (if applicable), and more. + +Model Artifacts and Deployments +=============================== + +The SDK makes use of pre-compiled DNN (Deep Neural Network) models and performs +inference using various OSRT (open source runtime) such as TFLite runtime, +ONNX runtime and Neo AI-DLR (related to TVM). + +Model deployment can be performed in several ways -- +Please view the :doc:`Sample Apps page <./sample_apps>` for more details on +deploying within a live application. + +In order to infer a DNN, SDK expects the DNN and +associated artifacts in the below directory structure. + +.. code-block:: text + + TFL-OD-2010-ssd-mobV2-coco-mlperf-300x300 + │ + ├── param.yaml + | + ├── dataset.yaml + │ + ├── artifacts + │   ├── 264_tidl_io_1.bin + │   ├── 264_tidl_net.bin + │   ├── 264_tidl_net.bin.layer_info.txt + │   ├── 264_tidl_net.bin_netLog.txt + │   ├── 264_tidl_net.bin.svg + │   ├── allowedNode.txt + │   └── runtimes_visualization.svg + │ + └── model + └── ssd_mobilenet_v2_300_float.tflite + +DNN directory structure +----------------------- + +Each DNN must have the following 4 components: + +#. **model**: This directory contains the DNN being targeted to infer +#. **artifacts**: This directory contains the artifacts generated after the + compilation of DNN for SDK. These artifacts can be generated and validated + with simple file based examples provided in Edge AI TIDL Tools +#. **param.yaml**: A configuration file in yaml format to provide basic + information about DNN, and associated pre and post processing parameters +#. **dataset.yaml**: A configuration file in yaml format needed for only + classification and detection. It contains the mapping from output key to + actual labels. + + +.. _pub_edgeai_params: + +Param file format +----------------- + +Each DNN has its own pre-process, inference and post-process +parameters to get the correct output. This information is typically available in +the training software that was used to train the model. In order to convey this +information to the SDK in a standardized fashion, we have defined a set of +parameters that describe these operations. These parameters are in the +param.yaml file. + +Please see sample yaml files for various tasks such as image classification, +semantic segmentation and object detection in +`edgeai-benchmark examples `_. +Descriptions of various parameters are also in the yaml files. If users want to +bring their own model to the SDK, then they need to prepare this information +for the `model compilation / import procedure <#import-custom-models>`__. + + diff --git a/source/edgeai/measure_perf.rst b/source/edgeai/measure_perf.rst new file mode 100644 index 000000000..e129575f6 --- /dev/null +++ b/source/edgeai/measure_perf.rst @@ -0,0 +1,116 @@ +.. _pub_edgeai_perf_viz_tool: + +===================== +Measuring performance +===================== + +There are simple tools to get the performance numbers like core loadings, DDR bandwidths, +HWA loadings, GStreamer element latencies etc.. on the bash terminal. + +GStreamer plugin for Performance measurement +-------------------------------------------- + +This custom GStreamer plugin allows users to include these non-intrusive elements in the pipeline which +overlays the performance information directly on the output image displayed on the screen. The entire processing, +is done on native NV12 format which makes it convenient to use along with opTIFlow pipelines. +For detailed instructions to use the plugin please refer to +`tiperfoverlay `_ + +A preview of performance overlay on the display is as shown, + +.. ifconfig:: CONFIG_part_variant in ('AM62AX') + + .. figure:: ../images/edgeai/am62a_perf_overlay.jpg + :scale: 30 + :align: center + +.. ifconfig:: CONFIG_part_variant in ('AM68A') + + .. figure:: ../images/edgeai/am68a_perf_overlay.jpg + :scale: 30 + :align: center + +.. ifconfig:: CONFIG_part_variant in ('AM69A') + + .. figure:: ../images/edgeai/am69a_perf_overlay.jpg + :scale: 30 + :align: center + +.. ifconfig:: CONFIG_part_variant in ('TDA4VM') + + .. figure:: ../images/edgeai/tda4vm_perf_overlay.jpg + :scale: 30 + :align: center + +.. ifconfig:: CONFIG_part_variant in ('AM67A') + + .. figure:: ../images/edgeai/am67a_perf_overlay.jpg + :scale: 30 + :align: center + +Perf-stats tool +--------------- + +Perf-stats tool is a simple cpp application which prints stats on the terminal +and updates it every second. To use this tool, it needs to be compiled and +run in a parallel ssh terminal along with the application. For detailed +instructions please refer to +`perf-stats readme `_ + +below is the sample output of the tool + +.. code-block:: text + + Summary of CPU load, + ==================== + CPU: mpu1_0: TOTAL LOAD = 43.81 % ( HWI = 0.74 %, SWI = 0.24 % ) + CPU: c7x_1: TOTAL LOAD = 12. 0 % ( HWI = 0. 0 %, SWI = 0. 0 % ) + + HWA performance statistics, + =========================== + HWA: MSC0: LOAD = 6.93 % ( 45 MP/s ) + HWA: MSC1: LOAD = 6.93 % ( 60 MP/s ) + + DDR performance statistics, + =========================== + + DDR: READ BW: AVG = 1455 MB/s, PEAK = 6140 MB/s + DDR: WRITE BW: AVG = 332 MB/s, PEAK = 2138 MB/s + DDR: TOTAL BW: AVG = 1787 MB/s, PEAK = 8278 MB/s + +Parse GST Tracers +----------------- + +GStreamer has a feature called tracers to get useful statistics like element wise +latency, cpu loading, etc. as a part of GST debug logs. These logs are very +verbose and very difficult to interpret in the raw format. We provide a simple +python script to parse these logs on the fly and display the stats on the +terminal. For detailed instructions to use the script please refer to +`gst-tracers readme `_ + +below is the sample output of the script + +.. code-block:: text + + +------------------------------------------------------------------------------+ + |element latency out-latancy out-fps frames | + +------------------------------------------------------------------------------+ + |h264parse0 1.72 6580.05 0 3 | + |v4l2h264dec0 329.79 33.29 30 886 | + |tiovxmemalloc0 0.11 33.29 30 886 | + |capsfilter0 0.08 33.29 30 886 | + |split_01 20.37 16.65 60 1770 | + |queue0 0.31 33.30 30 885 | + |capsfilter1 0.16 33.30 30 885 | + |queue1 0.22 33.30 30 885 | + |capsfilter3 0.07 33.30 30 885 | + |tiovxdlpreproc0 1.63 33.30 30 885 | + |capsfilter2 0.43 33.30 30 885 | + |tidlinferer0 7.28 33.30 30 885 | + |post_0 2.57 33.30 30 885 | + |queue2 0.18 33.30 30 885 | + |mosaic_0 51.00 33.30 30 883 | + |capsfilter4 0.14 33.30 30 883 | + |queue3 30.80 33.34 30 882 | + |tiperfoverlay0 3.40 33.34 30 882 | + +------------------------------------------------------------------------------+ diff --git a/source/edgeai/sample_apps.rst b/source/edgeai/sample_apps.rst new file mode 100644 index 000000000..04a4e1554 --- /dev/null +++ b/source/edgeai/sample_apps.rst @@ -0,0 +1,193 @@ +.. _pub_edgeai_sample_apps: + +=================== +Edge AI sample apps +=================== + +There are various ways you can explore running a typical Edge AI usecase on +|__PART_FAMILY_NAME__| EVM, + + - Trying the out-of-box **Edge AI gallery** + - Develop Edge AI applications using **Python and C++ reference applications** + - Run optimized end-to-end GStreamer pipelines using **OpTIFlow** + - Run optimized end-to-end OpenVX pipelines using **TIOVX Apps** + +The SDK is packaged with networks which does 3 DL tasks as below, + + - **Image Classification**: Detects top N most approximate classes in the dataset for the given input frame + - **Object Detection**: Detects and draws bounding boxes around the objects, also classifies the objects to one of the classes in dataset + - **Semantic Segmentation**: Classifies each pixel into class in dataset + +Out-of-box GUI app +================== + +When the |__PART_FAMILY_NAME__| EVM is powered on with SD card in place, the +**Edge AI Gallery** comes up on boot as shown. + +One can connect a USB 2.0 mouse and click on the buttons in the left panel which +starts the Edge AI application running the selected DL task. In the background, +a GStremer pipeline is launched which reads a compressed video file and runs a +DL network on the decoded content. The output of DL inference is overlayed on the +image and sent to display. Users can select different DL tasks to execute on the +compressed video. + +There is also a "Custom" button, which when pressed allows users to select a +custom input (Camera/VideoFile/Image) and a custom model available in the +filesystem. This will automatically construct a GStreamer pipeline with required +elements and launch the application. + +- For a model to pop up on GUI, it needs to be present under ``/opt/model_zoo/`` +- For a videofile to pop up on GUI, the videos needs to be present under ``/opt/edgeai-test-data/videos/`` +- For an image to pop up on GUI, the images needs to be present under ``/opt/edgeai-test-data/iamges/`` + + +.. note:: + + Only Raw .H264/.H265 video will loop if selected as Input source. + Other formats like .MP4/.MOV/.AVI/.MKV will not loop. + +.. ifconfig:: CONFIG_part_variant in ('AM62AX') + + .. figure:: ../images/edgeai/am62a_oob_demo.jpg + :scale: 30 + :align: center + +.. ifconfig:: CONFIG_part_variant in ('AM68A') + + .. figure:: ../images/edgeai/am68a_oob_demo.jpg + :scale: 30 + :align: center + +.. ifconfig:: CONFIG_part_variant in ('AM69A') + + .. figure:: ../images/edgeai/am69a_oob_demo.jpg + :scale: 30 + :align: center + +.. ifconfig:: CONFIG_part_variant in ('TDA4VM') + + .. figure:: ../images/edgeai/tda4vm_oob_demo.jpg + :scale: 30 + :align: center + +.. ifconfig:: CONFIG_part_variant in ('AM67A') + + .. figure:: ../images/edgeai/am67a_oob_demo.jpg + :scale: 30 + :align: center + +.. _pub_edgeai_python_cpp_demos: + +Python/C++ apps +=============== + +Python based demos are simple executable scripts written for image +classification, object detection and semantic segmentation. Demos are +configured using a YAML file. Details on configuration file parameters can +be found in :ref:`pub_edgeai_configuration` + +Sample configuration files for out of the box demos can be found in +``edgeai-gst-apps/configs`` this folder also contains a template config file +which has brief info on each configurable parameter ``edgeai-gst-apps/configs/app_config_template.yaml`` + +Here is how a Python based image classification demo can be run, + +.. code-block:: bash + + /opt/edgeai-gst-apps/apps_python# ./app_edgeai.py ../configs/image_classification.yaml + +The demo captures the input frames from connected USB camera and passes +through pre-processing, inference and post-processing before sent to display. +Sample output for image classification and object detection demos are as below, + +.. |logo1| image:: ../images/edgeai/edgeai-image-classify.jpg + :align: middle +.. |logo2| image:: ../images/edgeai/edgeai-object-detect.jpg + :align: middle +.. |logo3| image:: ../images/edgeai/edgeai-sem-seg.jpg + :align: middle + ++---------+---------+---------+ +| |logo1| | |logo2| | |logo3| | ++---------+---------+---------+ + +To exit the demo press Ctrl+C. + +C++ apps are cross compiled while packaging, they can be directly tested as +given below + +.. code-block:: bash + + /opt/edgeai-gst-apps/apps_cpp# ./bin/Release/app_edgeai ../configs/image_classification.yaml + +To exit the demo press Ctrl+C. + +C++ apps can be modified and built on the target as well using below steps + +.. code-block:: bash + + /opt/edgeai-gst-apps/apps_cpp# rm -rf build bin lib + /opt/edgeai-gst-apps/apps_cpp# mkdir build + /opt/edgeai-gst-apps/apps_cpp# cd build + /opt/edgeai-gst-apps/apps_cpp/build# cmake .. + /opt/edgeai-gst-apps/apps_cpp/build# make -j2 + +.. _pub_edgeai_optiflow_apps: + +OpTIFlow +======== + +In Edge AI Python and C++ applications, post processing and DL inference are done between +appsink and appsrc application boundaries. This makes the data flow sub-optimal because of +unnecessary data format conversions to work with open source components. + +This is solved by providing DL-inferer plugin which calls one of the supported DL runtime and a post-process +plugin which works natively on NV12 format, avoiding unnecessary color formats conversions. + +Users can write their own pipeline or use optiflow application to generate and run the end-to-end pipeline. +Optiflow application shares the same config file as used by :ref:`pub_edgeai_python_cpp_demos`. + +To directly run the end-to-end pipeline use the following command. + +.. code-block:: bash + + /opt/edgeai-gst-apps/optiflow# ./optiflow.py ../configs/object_detection.yaml + +To exit the demo press Ctrl+C. + +To just dump the end-to-end pipeline use the following command. + +.. code-block:: bash + + /opt/edgeai-gst-apps/optiflow# ./optiflow.py ../configs/object_detection.yaml -t + + +.. note:: + + Python, C++ and OpTIFlow applications are similar by construction and can accept + the same config file + +EdgeAI Tiovx Apps +================= + +EdgeAI Tiovx Apps creates and runs optimized end-to-end OpenVx analytics pipelines +based on the user defined configuration. + +Please visit `EdgeAI Tiovx Apps wiki `_ +for in-depth documentation. + +To run a sample demo + +.. code-block:: bash + + /opt/edgeai-tiovx-apps/# ./bin/Release/edgeai-tiovx-apps-main configs/linux/object_detection.yaml + +To exit the demo press Ctrl+C. + +To run a multi-input multi-inference demo + +.. code-block:: bash + + /opt/edgeai-tiovx-apps/# ./bin/Release/edgeai-tiovx-apps-main configs/linux/multi_input_multi_inference.yaml + +To exit the demo press Ctrl+C. diff --git a/source/edgeai/sdk_components.rst b/source/edgeai/sdk_components.rst new file mode 100644 index 000000000..89ea33b81 --- /dev/null +++ b/source/edgeai/sdk_components.rst @@ -0,0 +1,175 @@ +.. _pub_sdk_components: + +=============== +SDK Components +=============== + +The Processor SDK Linux Edge AI for |__PART_FAMILY_NAME__| mainly comprises of three layers, + + - **Edge AI application stack** + - **Linux foundations** + - **Firmware builder** + +Edge AI application stack +========================= + +The Edge AI applications are designed for users to quickly evaluate various +deep learning networks with real-time inputs on the TI SoCs. Users can +evaluate pre-imported inference models or build a custom network for deployment +on the device. Once a network is finalized for performance and accuracy +it can be easily integrated in a GStreamer based applications for rapid +prototyping and deployment. The Edge AI application stack can be split into +below components which integrates well with the underlying foundational Linux +components and interacts with remote core firmware for acceleration. + +The entire Edge AI application stack can be downloaded on a PC and +cross-compiled for the desired target. For more details on the setup, +build and install steps please refer to **edgeai-app-stack** on `GitHub `_ + +.. figure:: ../images/edgeai/edgeai-app-stack.jpg + :scale: 65 + :align: center + +.. _pub_edgeai_gst_apps: + +edgeai-gst-apps +--------------- +These are plug-and-play deep learning applications which support running open +source runtime frameworks such as TFLite, ONNX and NeoAI-DLR with a variety of +input and output configurations. + +The latest source code with fixes can be pulled from `TI Edge AI GStreamer apps `_ + +.. _pub_edgeai_dl_inferer: + +edgeai-dl-inferer +----------------- +This repo provides interface to TI OSRT library whose APIs can be used standalone +or with an application like edgeai-gst-apps. It also provides the source of NV12 +post processing library and utils which are used with some custom GStreamer +plugins. + +The latest source code with fixes can be pulled from `TI Edge AI DL Inferer `_ + +.. _pub_edgeai_gst_plugins: + +edgeai-gst-plugins +------------------ +This repo provides the source of custom GStreamer plugins which helps offload +tasks to the hardware accelerators with the help of edgeai-tiovx-modules. + +Source code and documentation `TI Edge AI GStreamer plugins `_ + +.. _pub_edgeai_tiovx_modules: + +edgeai-app-utils +-------------------- +This repo provides utility APIs for NV12 drawing and font rendering, reporting +MPU and DDR performance, ARM Neon optimized kernels for color conversion, +pre-processing and scaling. + +Source code and documentation: `TI Edge AI Apps utils `_ + +edgeai-tiovx-modules +-------------------- +This repo provides OpenVx modules which help access underlying hardware +accelerators in the SoC and serves as a bridge between GStreamer +custom elements and underlying OpenVx custom kernels. + +Source code and documentation: `TI Edge AI TIOVX modules `_ + +.. _pub_edgeai_tiovx_kernels: + +edgeai-tiovx-kernels +-------------------- +This repo provides OpenVx kernels which help accelerate color-convert, +DL-pre-processing and DL-post-processing using ARMv8 NEON accelerator + +Source code and documentation: `TI Edge AI TIOVX kernels `_ + +.. _pub_edgeai_tiovx_apps: + +edgeai-tiovx-apps +----------------- + +This repo provides a layer on top of OpenVX to easily create a OpenVX +graph and connect them to v4l2 blocks to realize various complex +usecases + +Source code and documentation: `TI Edge AI TIOVX Apps `_ + +.. _pub_edgeai_modelzoo: + +Foundation Linux +================ +The Edge AI app stack is built on top of foundation Linux components which +includes, uBoot, Linux kernels, device drivers, multimedia codecs, GPU drivers +and a lot more. The Foundation Linux is built using the Yocto project and sources +publicly available to build the entire image completely from scratch. We also +provide an installer, which packages pre-built Linux filesystem, board support +package and tools to customize Linux layers of the software stack. + +.. ifconfig:: CONFIG_part_variant in ('AM62AX') + + Click `AM62AX Linux Foundation components `_ + to explore more! + +.. ifconfig:: CONFIG_part_variant in ('AM67A') + + Click `AM67A Linux Foundation components `_ + to explore more! + +.. ifconfig:: CONFIG_part_variant in ('AM68A') + + Click `AM68A Linux Foundation components `_ + to explore more! + +.. ifconfig:: CONFIG_part_variant in ('AM69A') + + Click `AM69A Linux Foundation components `_ + to explore more! + +.. ifconfig:: CONFIG_part_variant in ('TDA4VM') + + Click `Processor SDK Linux J721E Foundation Components `_ + to explore more! + +Firmware builder +================ + +|__PART_FAMILY_NAME__| firmware builder package is required only when dealing +with low level software components such as remote core firmware, drivers to +hardware accelerators, system memory map changes etc. For user space application +development this is not required. + +.. ifconfig:: CONFIG_part_variant in ('AM62AX') + + Access to `FIRMWARE-BUILDER-AM62AX `_ + is provided via MySecureSW and requires a login. + + Click `AM62AX REQUEST LINK `_ + to request access. + +.. ifconfig:: CONFIG_part_variant in ('AM68A') + + Access to `FIRMWARE-BUILDER-AM68A `_ + is provided via MySecureSW and requires a login. + + Click `AM68A REQUEST LINK `_ + to request access. + +.. ifconfig:: CONFIG_part_variant in ('AM69A') + + Access to `FIRMWARE-BUILDER-AM69A `_ + is provided via MySecureSW and requires a login. + + Click `AM69A REQUEST LINK `_ + to request access. + +.. ifconfig:: CONFIG_part_variant in ('TDA4VM') + + Access to `FIRMWARE-BUILDER-TDA4VM `_ + is provided via MySecureSW and requires a login. + + Click `TDA4VM REQUEST LINK `_ + to request access. diff --git a/source/edgeai/sdk_overview.rst b/source/edgeai/sdk_overview.rst new file mode 100644 index 000000000..050ac7b9f --- /dev/null +++ b/source/edgeai/sdk_overview.rst @@ -0,0 +1,80 @@ +.. _pub_sdk_overview: + +======== +Overview +======== + +**Welcome to Processor SDK Linux Edge AI for** |__PART_FAMILY_NAME__| **!** + +The SDK provides software and tools to let the users effectively balance +deep learning performance with system power and cost on Texas Instrument’s +processors for edge AI applications. We offer a practical embedded inference +solution for next-generation vehicles, smart cameras, edge AI boxes, and +autonomous machines and robots. In addition to general purpose micro processors, +|__PART_FAMILY_NAME__| has integrated micro controllers, DSP, and accelerators +for neural network, image, vision, and multimedia processing. With a few simple +steps one can run high performance computer vision and deep learning demos +using a live camera and display. + +.. |pic1| image:: ../images/edgeai/edgeai-overview-image1.jpg + :align: middle +.. |pic2| image:: ../images/edgeai/edgeai-overview-image2.jpg + :align: middle +.. |pic3| image:: ../images/edgeai/edgeai-overview-image3.jpg + :align: middle + ++--------+--------+--------+ +| |pic1| | |pic2| | |pic3| | ++--------+--------+--------+ + +.. figure:: ../images/edgeai/edgeai-sdk-feature.png + :scale: 50 + :align: center + + Processor SDK Linux Edge AI for |__PART_FAMILY_NAME__| feature overview + + +The SDK also enables an interplay of multiple open-source components such as +GStreamer, OpenVx, OpenCV and deep learning runtime such as TFLite, ONNX and +Neo-AI DLR. The reference applications showcase perception based examples such +as image classification, object detection and semantic segmentation in both +Python and C++ variants. The SDK supports edit-build-debug cycles directly on +the target and also on PC to cross compile and build the applications. + +.. figure:: ../images/edgeai/open-src-components.png + :scale: 50 + :align: center + + Industry Standard Components supported in Processor SDK Linux Edge AI for |__PART_FAMILY_NAME__| + +The SDK mainly comprises of three parts as shown in the illustration below. +The Edge AI application stack is used to run analytics applications with +real-time inputs/outputs. The Foundational Linux components providing OS, uBoot, +kernel, filesystem, linux drivers and firmware for the remote core and hardware +accelerators. + +.. ifconfig:: CONFIG_part_variant in ('AM62AX') + + .. figure:: ../images/edgeai/sdk_overview_am62a.jpg + :scale: 75 + :align: center + +.. ifconfig:: CONFIG_part_variant in ('AM68A') + + .. figure:: ../images/edgeai/sdk_overview_am68a.jpg + :scale: 75 + :align: center + +.. ifconfig:: CONFIG_part_variant in ('AM69A') + + .. figure:: ../images/edgeai/sdk_overview_am69a.jpg + :scale: 75 + :align: center + +.. ifconfig:: CONFIG_part_variant in ('TDA4VM') + + .. figure:: ../images/edgeai/sdk_overview_tda4vm.jpg + :scale: 75 + :align: center + +To get started with the setup click the **Next** button. diff --git a/source/images/edgeai/DNN-BYOD.png b/source/images/edgeai/DNN-BYOD.png new file mode 100644 index 000000000..a45a35f91 Binary files /dev/null and b/source/images/edgeai/DNN-BYOD.png differ diff --git a/source/images/edgeai/DNN-BYOM.png b/source/images/edgeai/DNN-BYOM.png new file mode 100644 index 000000000..7be1c6ea0 Binary files /dev/null and b/source/images/edgeai/DNN-BYOM.png differ diff --git a/source/images/edgeai/DNN-dev-flow-top.png b/source/images/edgeai/DNN-dev-flow-top.png new file mode 100644 index 000000000..baf9a4296 Binary files /dev/null and b/source/images/edgeai/DNN-dev-flow-top.png differ diff --git a/source/images/edgeai/DNN-model-dev-flows.vsdx b/source/images/edgeai/DNN-model-dev-flows.vsdx new file mode 100644 index 000000000..38c3fc720 Binary files /dev/null and b/source/images/edgeai/DNN-model-dev-flows.vsdx differ diff --git a/source/images/edgeai/DNN-modelzoo-eval.png b/source/images/edgeai/DNN-modelzoo-eval.png new file mode 100644 index 000000000..f8383871c Binary files /dev/null and b/source/images/edgeai/DNN-modelzoo-eval.png differ diff --git a/source/images/edgeai/E2e.jpg b/source/images/edgeai/E2e.jpg new file mode 100644 index 000000000..608e10bde Binary files /dev/null and b/source/images/edgeai/E2e.jpg differ diff --git a/source/images/edgeai/TDA4VM-SK-SD-Boot.png b/source/images/edgeai/TDA4VM-SK-SD-Boot.png new file mode 100644 index 000000000..0e5535c41 Binary files /dev/null and b/source/images/edgeai/TDA4VM-SK-SD-Boot.png differ diff --git a/source/images/edgeai/am62a-sk_exp_hdr.png b/source/images/edgeai/am62a-sk_exp_hdr.png new file mode 100644 index 000000000..33712bf75 Binary files /dev/null and b/source/images/edgeai/am62a-sk_exp_hdr.png differ diff --git a/source/images/edgeai/am62a_bootpins.jpg b/source/images/edgeai/am62a_bootpins.jpg new file mode 100644 index 000000000..5ae0d150b Binary files /dev/null and b/source/images/edgeai/am62a_bootpins.jpg differ diff --git a/source/images/edgeai/am62a_evm.jpg b/source/images/edgeai/am62a_evm.jpg new file mode 100644 index 000000000..4ebb7325a Binary files /dev/null and b/source/images/edgeai/am62a_evm.jpg differ diff --git a/source/images/edgeai/am62a_mini_fusion.png b/source/images/edgeai/am62a_mini_fusion.png new file mode 100644 index 000000000..11b70464f Binary files /dev/null and b/source/images/edgeai/am62a_mini_fusion.png differ diff --git a/source/images/edgeai/am62a_oob_banner.jpg b/source/images/edgeai/am62a_oob_banner.jpg new file mode 100644 index 000000000..c26822ee4 Binary files /dev/null and b/source/images/edgeai/am62a_oob_banner.jpg differ diff --git a/source/images/edgeai/am62a_oob_demo.jpg b/source/images/edgeai/am62a_oob_demo.jpg new file mode 100644 index 000000000..1087275e0 Binary files /dev/null and b/source/images/edgeai/am62a_oob_demo.jpg differ diff --git a/source/images/edgeai/am62a_perf_overlay.jpg b/source/images/edgeai/am62a_perf_overlay.jpg new file mode 100644 index 000000000..3ac1b4efb Binary files /dev/null and b/source/images/edgeai/am62a_perf_overlay.jpg differ diff --git a/source/images/edgeai/am62a_rpi.jpg b/source/images/edgeai/am62a_rpi.jpg new file mode 100644 index 000000000..bc3cc44c0 Binary files /dev/null and b/source/images/edgeai/am62a_rpi.jpg differ diff --git a/source/images/edgeai/am67a_bootpins.jpg b/source/images/edgeai/am67a_bootpins.jpg new file mode 100644 index 000000000..647f8acc5 Binary files /dev/null and b/source/images/edgeai/am67a_bootpins.jpg differ diff --git a/source/images/edgeai/am67a_csi_camera_connection.jpg b/source/images/edgeai/am67a_csi_camera_connection.jpg new file mode 100644 index 000000000..0bd43eea6 Binary files /dev/null and b/source/images/edgeai/am67a_csi_camera_connection.jpg differ diff --git a/source/images/edgeai/am67a_evm.jpg b/source/images/edgeai/am67a_evm.jpg new file mode 100644 index 000000000..699f1e2bb Binary files /dev/null and b/source/images/edgeai/am67a_evm.jpg differ diff --git a/source/images/edgeai/am67a_mini_fusion.png b/source/images/edgeai/am67a_mini_fusion.png new file mode 100644 index 000000000..9c6f24ff9 Binary files /dev/null and b/source/images/edgeai/am67a_mini_fusion.png differ diff --git a/source/images/edgeai/am67a_oob_banner.jpg b/source/images/edgeai/am67a_oob_banner.jpg new file mode 100755 index 000000000..53849f619 Binary files /dev/null and b/source/images/edgeai/am67a_oob_banner.jpg differ diff --git a/source/images/edgeai/am67a_oob_demo.jpg b/source/images/edgeai/am67a_oob_demo.jpg new file mode 100755 index 000000000..1e247c0ab Binary files /dev/null and b/source/images/edgeai/am67a_oob_demo.jpg differ diff --git a/source/images/edgeai/am67a_perf_overlay.jpg b/source/images/edgeai/am67a_perf_overlay.jpg new file mode 100755 index 000000000..bbae42db1 Binary files /dev/null and b/source/images/edgeai/am67a_perf_overlay.jpg differ diff --git a/source/images/edgeai/am67a_rpi.jpg b/source/images/edgeai/am67a_rpi.jpg new file mode 100644 index 000000000..d068b5080 Binary files /dev/null and b/source/images/edgeai/am67a_rpi.jpg differ diff --git a/source/images/edgeai/am68a_bootpins.jpg b/source/images/edgeai/am68a_bootpins.jpg new file mode 100644 index 000000000..7fc51366e Binary files /dev/null and b/source/images/edgeai/am68a_bootpins.jpg differ diff --git a/source/images/edgeai/am68a_csi_camera_connection.png b/source/images/edgeai/am68a_csi_camera_connection.png new file mode 100644 index 000000000..fba7ed438 Binary files /dev/null and b/source/images/edgeai/am68a_csi_camera_connection.png differ diff --git a/source/images/edgeai/am68a_evm.jpg b/source/images/edgeai/am68a_evm.jpg new file mode 100644 index 000000000..abcbc2833 Binary files /dev/null and b/source/images/edgeai/am68a_evm.jpg differ diff --git a/source/images/edgeai/am68a_mini_fusion.png b/source/images/edgeai/am68a_mini_fusion.png new file mode 100644 index 000000000..0b7466b60 Binary files /dev/null and b/source/images/edgeai/am68a_mini_fusion.png differ diff --git a/source/images/edgeai/am68a_oob_banner.jpg b/source/images/edgeai/am68a_oob_banner.jpg new file mode 100644 index 000000000..41971b8af Binary files /dev/null and b/source/images/edgeai/am68a_oob_banner.jpg differ diff --git a/source/images/edgeai/am68a_oob_demo.jpg b/source/images/edgeai/am68a_oob_demo.jpg new file mode 100644 index 000000000..b5ae1ba69 Binary files /dev/null and b/source/images/edgeai/am68a_oob_demo.jpg differ diff --git a/source/images/edgeai/am68a_perf_overlay.jpg b/source/images/edgeai/am68a_perf_overlay.jpg new file mode 100644 index 000000000..5fd0e13fd Binary files /dev/null and b/source/images/edgeai/am68a_perf_overlay.jpg differ diff --git a/source/images/edgeai/am68a_rpi.jpg b/source/images/edgeai/am68a_rpi.jpg new file mode 100644 index 000000000..e65b66d19 Binary files /dev/null and b/source/images/edgeai/am68a_rpi.jpg differ diff --git a/source/images/edgeai/am68sk_exp_hdr.png b/source/images/edgeai/am68sk_exp_hdr.png new file mode 100644 index 000000000..35f5dddfd Binary files /dev/null and b/source/images/edgeai/am68sk_exp_hdr.png differ diff --git a/source/images/edgeai/am69a_bootpins.jpg b/source/images/edgeai/am69a_bootpins.jpg new file mode 100644 index 000000000..1bf4c3ea2 Binary files /dev/null and b/source/images/edgeai/am69a_bootpins.jpg differ diff --git a/source/images/edgeai/am69a_csi_camera_connection.png b/source/images/edgeai/am69a_csi_camera_connection.png new file mode 100644 index 000000000..70d6ac6f1 Binary files /dev/null and b/source/images/edgeai/am69a_csi_camera_connection.png differ diff --git a/source/images/edgeai/am69a_evm.jpg b/source/images/edgeai/am69a_evm.jpg new file mode 100644 index 000000000..d0a0af48a Binary files /dev/null and b/source/images/edgeai/am69a_evm.jpg differ diff --git a/source/images/edgeai/am69a_mini_fusion.png b/source/images/edgeai/am69a_mini_fusion.png new file mode 100644 index 000000000..5268739f5 Binary files /dev/null and b/source/images/edgeai/am69a_mini_fusion.png differ diff --git a/source/images/edgeai/am69a_oob_banner.jpg b/source/images/edgeai/am69a_oob_banner.jpg new file mode 100755 index 000000000..b9dbe3a57 Binary files /dev/null and b/source/images/edgeai/am69a_oob_banner.jpg differ diff --git a/source/images/edgeai/am69a_oob_demo.jpg b/source/images/edgeai/am69a_oob_demo.jpg new file mode 100644 index 000000000..4a2a0c832 Binary files /dev/null and b/source/images/edgeai/am69a_oob_demo.jpg differ diff --git a/source/images/edgeai/am69a_perf_overlay.jpg b/source/images/edgeai/am69a_perf_overlay.jpg new file mode 100644 index 000000000..be2c89cb6 Binary files /dev/null and b/source/images/edgeai/am69a_perf_overlay.jpg differ diff --git a/source/images/edgeai/am69a_rpi.jpg b/source/images/edgeai/am69a_rpi.jpg new file mode 100644 index 000000000..cda3966ea Binary files /dev/null and b/source/images/edgeai/am69a_rpi.jpg differ diff --git a/source/images/edgeai/am69sk_exp_hdr.png b/source/images/edgeai/am69sk_exp_hdr.png new file mode 100644 index 000000000..4019f752c Binary files /dev/null and b/source/images/edgeai/am69sk_exp_hdr.png differ diff --git a/source/images/edgeai/balena_etcher.png b/source/images/edgeai/balena_etcher.png new file mode 100644 index 000000000..37518a24b Binary files /dev/null and b/source/images/edgeai/balena_etcher.png differ diff --git a/source/images/edgeai/board_connections_tda4vm_evm.jpg b/source/images/edgeai/board_connections_tda4vm_evm.jpg new file mode 100644 index 000000000..df7928699 Binary files /dev/null and b/source/images/edgeai/board_connections_tda4vm_evm.jpg differ diff --git a/source/images/edgeai/board_connections_tda4vm_sk.jpg b/source/images/edgeai/board_connections_tda4vm_sk.jpg new file mode 100644 index 000000000..bfff3ff7e Binary files /dev/null and b/source/images/edgeai/board_connections_tda4vm_sk.jpg differ diff --git a/source/images/edgeai/boot_wallpaper.jpg b/source/images/edgeai/boot_wallpaper.jpg new file mode 100644 index 000000000..ec67b1494 Binary files /dev/null and b/source/images/edgeai/boot_wallpaper.jpg differ diff --git a/source/images/edgeai/datasheet_optiflow_pipeline1.png b/source/images/edgeai/datasheet_optiflow_pipeline1.png new file mode 100644 index 000000000..645ad3fc6 Binary files /dev/null and b/source/images/edgeai/datasheet_optiflow_pipeline1.png differ diff --git a/source/images/edgeai/datasheet_optiflow_pipeline2.png b/source/images/edgeai/datasheet_optiflow_pipeline2.png new file mode 100644 index 000000000..5f4aec30d Binary files /dev/null and b/source/images/edgeai/datasheet_optiflow_pipeline2.png differ diff --git a/source/images/edgeai/dof_dataflow.png b/source/images/edgeai/dof_dataflow.png new file mode 100644 index 000000000..37b38c880 Binary files /dev/null and b/source/images/edgeai/dof_dataflow.png differ diff --git a/source/images/edgeai/edge_ai_demos_CPP_Demo_Data_Flow.png b/source/images/edgeai/edge_ai_demos_CPP_Demo_Data_Flow.png new file mode 100644 index 000000000..4fdfb28d9 Binary files /dev/null and b/source/images/edgeai/edge_ai_demos_CPP_Demo_Data_Flow.png differ diff --git a/source/images/edgeai/edge_ai_demos_Python_Demo_Data_Flow.png b/source/images/edgeai/edge_ai_demos_Python_Demo_Data_Flow.png new file mode 100644 index 000000000..9d14585f0 Binary files /dev/null and b/source/images/edgeai/edge_ai_demos_Python_Demo_Data_Flow.png differ diff --git a/source/images/edgeai/edgeai-app-stack.jpg b/source/images/edgeai/edgeai-app-stack.jpg new file mode 100644 index 000000000..6629cf150 Binary files /dev/null and b/source/images/edgeai/edgeai-app-stack.jpg differ diff --git a/source/images/edgeai/edgeai-image-classify.jpg b/source/images/edgeai/edgeai-image-classify.jpg new file mode 100644 index 000000000..e89b888af Binary files /dev/null and b/source/images/edgeai/edgeai-image-classify.jpg differ diff --git a/source/images/edgeai/edgeai-object-detect.jpg b/source/images/edgeai/edgeai-object-detect.jpg new file mode 100644 index 000000000..80bce5f45 Binary files /dev/null and b/source/images/edgeai/edgeai-object-detect.jpg differ diff --git a/source/images/edgeai/edgeai-overview-image1.jpg b/source/images/edgeai/edgeai-overview-image1.jpg new file mode 100644 index 000000000..d5f55221c Binary files /dev/null and b/source/images/edgeai/edgeai-overview-image1.jpg differ diff --git a/source/images/edgeai/edgeai-overview-image2.jpg b/source/images/edgeai/edgeai-overview-image2.jpg new file mode 100644 index 000000000..7a2e8bbf9 Binary files /dev/null and b/source/images/edgeai/edgeai-overview-image2.jpg differ diff --git a/source/images/edgeai/edgeai-overview-image3.jpg b/source/images/edgeai/edgeai-overview-image3.jpg new file mode 100644 index 000000000..bbf27eb8a Binary files /dev/null and b/source/images/edgeai/edgeai-overview-image3.jpg differ diff --git a/source/images/edgeai/edgeai-sdk-components.png b/source/images/edgeai/edgeai-sdk-components.png new file mode 100644 index 000000000..fa9fbe3ea Binary files /dev/null and b/source/images/edgeai/edgeai-sdk-components.png differ diff --git a/source/images/edgeai/edgeai-sdk-feature.png b/source/images/edgeai/edgeai-sdk-feature.png new file mode 100644 index 000000000..9a2a45ab7 Binary files /dev/null and b/source/images/edgeai/edgeai-sdk-feature.png differ diff --git a/source/images/edgeai/edgeai-sdk-overview.png b/source/images/edgeai/edgeai-sdk-overview.png new file mode 100644 index 000000000..fba1fd54f Binary files /dev/null and b/source/images/edgeai/edgeai-sdk-overview.png differ diff --git a/source/images/edgeai/edgeai-sdk-programming-env.png b/source/images/edgeai/edgeai-sdk-programming-env.png new file mode 100644 index 000000000..ae1540213 Binary files /dev/null and b/source/images/edgeai/edgeai-sdk-programming-env.png differ diff --git a/source/images/edgeai/edgeai-sdk-roadmap.png b/source/images/edgeai/edgeai-sdk-roadmap.png new file mode 100644 index 000000000..bba800d5c Binary files /dev/null and b/source/images/edgeai/edgeai-sdk-roadmap.png differ diff --git a/source/images/edgeai/edgeai-sem-seg.jpg b/source/images/edgeai/edgeai-sem-seg.jpg new file mode 100755 index 000000000..fdd6bf93d Binary files /dev/null and b/source/images/edgeai/edgeai-sem-seg.jpg differ diff --git a/source/images/edgeai/edgeai_app_image_classification.png b/source/images/edgeai/edgeai_app_image_classification.png new file mode 100644 index 000000000..9f4481a8c Binary files /dev/null and b/source/images/edgeai/edgeai_app_image_classification.png differ diff --git a/source/images/edgeai/edgeai_app_multi_input_multi_infer.png b/source/images/edgeai/edgeai_app_multi_input_multi_infer.png new file mode 100644 index 000000000..356717b88 Binary files /dev/null and b/source/images/edgeai/edgeai_app_multi_input_multi_infer.png differ diff --git a/source/images/edgeai/edgeai_app_object_detection.png b/source/images/edgeai/edgeai_app_object_detection.png new file mode 100644 index 000000000..f8863f330 Binary files /dev/null and b/source/images/edgeai/edgeai_app_object_detection.png differ diff --git a/source/images/edgeai/edgeai_app_semantic_segmentation.png b/source/images/edgeai/edgeai_app_semantic_segmentation.png new file mode 100644 index 000000000..a0020db52 Binary files /dev/null and b/source/images/edgeai/edgeai_app_semantic_segmentation.png differ diff --git a/source/images/edgeai/edgeai_app_single_input_multi_infer.png b/source/images/edgeai/edgeai_app_single_input_multi_infer.png new file mode 100644 index 000000000..b3a431223 Binary files /dev/null and b/source/images/edgeai/edgeai_app_single_input_multi_infer.png differ diff --git a/source/images/edgeai/favicon.ico b/source/images/edgeai/favicon.ico new file mode 100644 index 000000000..d14b5d986 Binary files /dev/null and b/source/images/edgeai/favicon.ico differ diff --git a/source/images/edgeai/j721esk-ap.png b/source/images/edgeai/j721esk-ap.png new file mode 100644 index 000000000..f01438d13 Binary files /dev/null and b/source/images/edgeai/j721esk-ap.png differ diff --git a/source/images/edgeai/j721esk_exp_hdr.png b/source/images/edgeai/j721esk_exp_hdr.png new file mode 100644 index 000000000..e3a66ace2 Binary files /dev/null and b/source/images/edgeai/j721esk_exp_hdr.png differ diff --git a/source/images/edgeai/j722s-evm-exp_hdr.png b/source/images/edgeai/j722s-evm-exp_hdr.png new file mode 100644 index 000000000..533069c08 Binary files /dev/null and b/source/images/edgeai/j722s-evm-exp_hdr.png differ diff --git a/source/images/edgeai/model_downloader.png b/source/images/edgeai/model_downloader.png new file mode 100644 index 000000000..8f029a88a Binary files /dev/null and b/source/images/edgeai/model_downloader.png differ diff --git a/source/images/edgeai/open-src-components.png b/source/images/edgeai/open-src-components.png new file mode 100644 index 000000000..0bc8e05b0 Binary files /dev/null and b/source/images/edgeai/open-src-components.png differ diff --git a/source/images/edgeai/optiflow_image_classification.png b/source/images/edgeai/optiflow_image_classification.png new file mode 100644 index 000000000..0211ed796 Binary files /dev/null and b/source/images/edgeai/optiflow_image_classification.png differ diff --git a/source/images/edgeai/optiflow_multi_input_multi_infer.png b/source/images/edgeai/optiflow_multi_input_multi_infer.png new file mode 100644 index 000000000..15fbf9505 Binary files /dev/null and b/source/images/edgeai/optiflow_multi_input_multi_infer.png differ diff --git a/source/images/edgeai/optiflow_object_detection.png b/source/images/edgeai/optiflow_object_detection.png new file mode 100644 index 000000000..908b488ce Binary files /dev/null and b/source/images/edgeai/optiflow_object_detection.png differ diff --git a/source/images/edgeai/optiflow_semantic_segmentation.png b/source/images/edgeai/optiflow_semantic_segmentation.png new file mode 100644 index 000000000..7baa862a5 Binary files /dev/null and b/source/images/edgeai/optiflow_semantic_segmentation.png differ diff --git a/source/images/edgeai/optiflow_single_input_multi_infer.png b/source/images/edgeai/optiflow_single_input_multi_infer.png new file mode 100644 index 000000000..7527871ec Binary files /dev/null and b/source/images/edgeai/optiflow_single_input_multi_infer.png differ diff --git a/source/images/edgeai/sde_dataflow.png b/source/images/edgeai/sde_dataflow.png new file mode 100644 index 000000000..edc3993f2 Binary files /dev/null and b/source/images/edgeai/sde_dataflow.png differ diff --git a/source/images/edgeai/sdk_overview_am62a.jpg b/source/images/edgeai/sdk_overview_am62a.jpg new file mode 100644 index 000000000..c1a8c640e Binary files /dev/null and b/source/images/edgeai/sdk_overview_am62a.jpg differ diff --git a/source/images/edgeai/sdk_overview_am68a.jpg b/source/images/edgeai/sdk_overview_am68a.jpg new file mode 100644 index 000000000..aacd83ec5 Binary files /dev/null and b/source/images/edgeai/sdk_overview_am68a.jpg differ diff --git a/source/images/edgeai/sdk_overview_am69a.jpg b/source/images/edgeai/sdk_overview_am69a.jpg new file mode 100644 index 000000000..8fb3e7161 Binary files /dev/null and b/source/images/edgeai/sdk_overview_am69a.jpg differ diff --git a/source/images/edgeai/sdk_overview_tda4vm.jpg b/source/images/edgeai/sdk_overview_tda4vm.jpg new file mode 100644 index 000000000..923306602 Binary files /dev/null and b/source/images/edgeai/sdk_overview_tda4vm.jpg differ diff --git a/source/images/edgeai/tda4vm_csi_camera_connection.png b/source/images/edgeai/tda4vm_csi_camera_connection.png new file mode 100644 index 000000000..9e8505cdd Binary files /dev/null and b/source/images/edgeai/tda4vm_csi_camera_connection.png differ diff --git a/source/images/edgeai/tda4vm_oob_banner.jpg b/source/images/edgeai/tda4vm_oob_banner.jpg new file mode 100644 index 000000000..b418bee3a Binary files /dev/null and b/source/images/edgeai/tda4vm_oob_banner.jpg differ diff --git a/source/images/edgeai/tda4vm_oob_demo.jpg b/source/images/edgeai/tda4vm_oob_demo.jpg new file mode 100644 index 000000000..1e77a86a1 Binary files /dev/null and b/source/images/edgeai/tda4vm_oob_demo.jpg differ diff --git a/source/images/edgeai/tda4vm_perf_overlay.jpg b/source/images/edgeai/tda4vm_perf_overlay.jpg new file mode 100644 index 000000000..cc17ac296 Binary files /dev/null and b/source/images/edgeai/tda4vm_perf_overlay.jpg differ diff --git a/source/images/edgeai/tda4vm_rpi_camera_connection.jpg b/source/images/edgeai/tda4vm_rpi_camera_connection.jpg new file mode 100644 index 000000000..d27d50055 Binary files /dev/null and b/source/images/edgeai/tda4vm_rpi_camera_connection.jpg differ diff --git a/source/images/edgeai/vs_code.png b/source/images/edgeai/vs_code.png new file mode 100644 index 000000000..a8aa2f078 Binary files /dev/null and b/source/images/edgeai/vs_code.png differ diff --git a/source/images/edgeai/wifi-oob-iw-command.png b/source/images/edgeai/wifi-oob-iw-command.png new file mode 100644 index 000000000..85d23a3ec Binary files /dev/null and b/source/images/edgeai/wifi-oob-iw-command.png differ