diff --git a/.readthedocs.yml b/.readthedocs.yml
new file mode 100644
index 00000000..740901be
--- /dev/null
+++ b/.readthedocs.yml
@@ -0,0 +1,15 @@
+version: 2
+
+sphinx:
+ builder: html
+ configuration: source/conf.py
+ fail_on_warning: true
+
+build:
+ os: ubuntu-22.04
+ tools:
+ python: "3.11"
+
+python:
+ install:
+ - requirements: requirements.txt
\ No newline at end of file
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
deleted file mode 100644
index 8372debb..00000000
--- a/azure-pipelines.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-# https://docs.microsoft.com/azure/devops/pipelines/languages/python
-pool:
- vmImage: 'ubuntu-16.04' # other options: 'macOS-10.13', 'vs2017-win2016'
-
-trigger:
-- master
-
-pr:
-- master
-
-steps:
-- task: UsePythonVersion@0
- inputs:
- versionSpec: '3.6'
- architecture: 'x64'
-
-- script: python -m pip install --upgrade pip setuptools wheel
- displayName: 'Install and upgrade pip'
-
-- script: pip install -r requirements.txt
- displayName: 'Install Requirements'
-
-- script: make html
- displayName: 'Compile Docs'
-
-- task: ArchiveFiles@2
- inputs:
- rootFolderOrFile: build/
- includeRootFolder: false
- archiveType: 'zip'
- archiveFile: 'build.zip'
- replaceExistingArchive: true
diff --git a/readthedocs.yml b/readthedocs.yml
deleted file mode 100644
index d079a58e..00000000
--- a/readthedocs.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-requirements_file: requirements.txt
-
-sphinx:
- builder: html
- configuration: source/docs/conf.py
- fail_on_warning: false
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index 045afff5..3873f023 100644
Binary files a/requirements.txt and b/requirements.txt differ
diff --git a/source/azure-pipelines.yml b/source/azure-pipelines.yml
deleted file mode 100644
index 8372debb..00000000
--- a/source/azure-pipelines.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-# https://docs.microsoft.com/azure/devops/pipelines/languages/python
-pool:
- vmImage: 'ubuntu-16.04' # other options: 'macOS-10.13', 'vs2017-win2016'
-
-trigger:
-- master
-
-pr:
-- master
-
-steps:
-- task: UsePythonVersion@0
- inputs:
- versionSpec: '3.6'
- architecture: 'x64'
-
-- script: python -m pip install --upgrade pip setuptools wheel
- displayName: 'Install and upgrade pip'
-
-- script: pip install -r requirements.txt
- displayName: 'Install Requirements'
-
-- script: make html
- displayName: 'Compile Docs'
-
-- task: ArchiveFiles@2
- inputs:
- rootFolderOrFile: build/
- includeRootFolder: false
- archiveType: 'zip'
- archiveFile: 'build.zip'
- replaceExistingArchive: true
diff --git a/source/conf.py b/source/conf.py
index a7f1b866..16d58a29 100644
--- a/source/conf.py
+++ b/source/conf.py
@@ -79,7 +79,6 @@ def setup(app):
"sidebar_hide_name": True,
"light_logo": "assets/PhotonVision-Header-onWhite.png",
"dark_logo": "assets/PhotonVision-Header-noBG.png",
- "announcement": "If you are new to PhotonVision, click here!.",
"light_css_variables": {
"font-stack": '-apple-system, BlinkMacSystemFont, avenir next, avenir, segoe ui, helvetica neue, helvetica, Ubuntu, roboto, noto, arial, sans-serif;',
diff --git a/source/docs/getting-started/best-practices.rst b/source/docs/additional-resources/best-practices.rst
similarity index 100%
rename from source/docs/getting-started/best-practices.rst
rename to source/docs/additional-resources/best-practices.rst
diff --git a/source/docs/hardware/config.rst b/source/docs/additional-resources/config.rst
similarity index 100%
rename from source/docs/hardware/config.rst
rename to source/docs/additional-resources/config.rst
diff --git a/source/docs/hardware/images/configDir.png b/source/docs/additional-resources/images/configDir.png
similarity index 100%
rename from source/docs/hardware/images/configDir.png
rename to source/docs/additional-resources/images/configDir.png
diff --git a/source/docs/programming/nt-api.rst b/source/docs/additional-resources/nt-api.rst
similarity index 100%
rename from source/docs/programming/nt-api.rst
rename to source/docs/additional-resources/nt-api.rst
diff --git a/source/docs/getting-started/pipeline-tuning/apriltag-tuning.rst b/source/docs/apriltag-pipelines/2D-tracking-tuning.rst
similarity index 70%
rename from source/docs/getting-started/pipeline-tuning/apriltag-tuning.rst
rename to source/docs/apriltag-pipelines/2D-tracking-tuning.rst
index 858ae989..5dcd0f74 100644
--- a/source/docs/getting-started/pipeline-tuning/apriltag-tuning.rst
+++ b/source/docs/apriltag-pipelines/2D-tracking-tuning.rst
@@ -1,5 +1,22 @@
-AprilTag Tuning
-===============
+2D AprilTag Tuning / Tracking
+=============================
+
+Tracking Apriltags
+------------------
+
+Before you get started tracking AprilTags, ensure that you have followed the previous sections on installation, wiring and networking. Next, open the Web UI, go to the top right card, and swtich to the "AprilTag" or "Aruco" type. You should see a screen similar to the one below.
+
+.. image:: images/apriltag.png
+ :align: center
+
+|
+
+You are now able to detect and track AprilTags in 2D (yaw, pitch, roll, etc.). In order to get 3D data from your AprilTags, please see :ref:`here. `
+
+Tuning AprilTags
+----------------
+
+AprilTag pipelines come with reasonable defaults to get you up and running with tracking. However, in order to optimize your performance and accuracy, you must tune your AprilTag pipeline using the settings below. Note that the settings below are different between the AprilTag and Aruco detectors but the concepts are the same.
.. image:: images/apriltag-tune.png
:scale: 45 %
@@ -8,38 +25,42 @@ AprilTag Tuning
|
Target Family
--------------
-Target families are defined by two numbers (before and after the h). The first number is the number of bits the tag is able to encode (which means more tags are available in the respective family) and the second is the hamming distance. Hamming distance describes the ability for error correction while identifying tag ids. A high hamming distance generally means that it will be easier for a tag to be identified even if there are errors. However, as hamming distance increases, the number of available tags decreases. The 2023 FRC game will be using 16h5 tags, which can be found `here `_. PhotonVision also supports the usage of 36h11 tags.
+^^^^^^^^^^^^^
+
+Target families are defined by two numbers (before and after the h). The first number is the number of bits the tag is able to encode (which means more tags are available in the respective family) and the second is the hamming distance. Hamming distance describes the ability for error correction while identifying tag ids. A high hamming distance generally means that it will be easier for a tag to be identified even if there are errors. However, as hamming distance increases, the number of available tags decreases. The 2024 FRC game will be using 36h11 tags, which can be found `here `_.
Decimate
---------
+^^^^^^^^
Decimation (also known as down-sampling) is the process of reducing the sampling frequency of a signal (in our case, the image). Increasing decimate will lead to an increased detection rate while decreasing detection distance. We recommend keeping this at the default value.
Blur
-----
+^^^^
This controls the sigma of Gaussian blur for tag detection. In clearer terms, increasing blur will make the image blurrier, decreasing it will make it closer to the original image. We strongly recommend that you keep blur to a minimum (0) due to it's high performance intensity unless you have an extremely noisy image.
Threads
--------
+^^^^^^^
Threads refers to the threads within your coprocessor's CPU. The theoretical maximum is device dependent, but we recommend that users to stick to one less than the amount of CPU threads that your coprocessor has. Increasing threads will increase performance at the cost of increased CPU load, temperature increase, etc. It may take some experimentation to find the most optimal value for your system.
Refine Edges
-------------
+^^^^^^^^^^^^
+
The edges of the each polygon are adjusted to "snap to" high color differences surrounding it. It is recommended to use this in tandem with decimate as it can increase the quality of the initial estimate.
Pose Iterations
----------------
+^^^^^^^^^^^^^^^
+
Pose iterations represents the amount of iterations done in order for the AprilTag algorithm to converge on its pose solution(s). A smaller number between 0-100 is recommended. A smaller amount of iterations cause a more noisy set of poses when looking at the tag straight on, while higher values much more consistently stick to a (potentially wrong) pair of poses. WPILib contains many useful filter classes in order to account for a noisy tag reading.
Max Error Bits
---------------
+^^^^^^^^^^^^^^
+
Max error bits, also known as hamming distance, is the number of positions at which corresponding pieces of data / tag are different. Put more generally, this is the number of bits (think of these as squares in the tag) that need to be changed / corrected in the tag to correctly detect it. A higher value means that more tags will be detected while a lower value cuts out tags that could be "questionable" in terms of detection.
We recommend a value of 0 for the 16h5 and 7+ for the 36h11 family.
Decision Margin Cutoff
------------------------
+^^^^^^^^^^^^^^^^^^^^^^
The decision margin cutoff is how much “margin” the detector has left before it rejects a tag; increasing this rejects poorer tags. We recommend you keep this value around a 30.
diff --git a/source/docs/apriltag-pipelines/3D-tracking.rst b/source/docs/apriltag-pipelines/3D-tracking.rst
new file mode 100644
index 00000000..8f1a99ee
--- /dev/null
+++ b/source/docs/apriltag-pipelines/3D-tracking.rst
@@ -0,0 +1,15 @@
+3D Tracking
+===========
+
+3D AprilTag tracking will allow you to track the real-world position and rotation of a tag relative to the camera's image sensor. This is useful for robot pose estimation and other applications like autonomous scoring. In order to use 3D tracking, you must first :ref:`calibrate your camera `. Once you have, you need to enable 3D mode in the UI and you will now be able to get 3D pose information from the tag! For information on getting and using this information in your code, see :ref:`the programming reference. `.
+
+Ambiguity
+---------
+
+Translating from 2D to 3D using data from the calibration and the four tag corners can lead to "pose ambiguity", where it appears that the AprilTag pose is flipping between two different poses. You can read more about this issue `here. ` Ambiguity is calculated as the ratio of reprojection errors between two pose solutions (if they exist), where reprojection error is the error corresponding to the image distance between where the apriltag's corners are detected vs where we expect to see them based on the tag's estimated camera relative pose.
+
+There a few steps you can take to resolve/mitigate this issue:
+
+1. Mount cameras at oblique angles so it is less likely that the tag will be seen straght on.
+2. Use the :ref:`MultiTag system ` in order to combine the corners from multiple tags to get a more accurate and unambiguous pose.
+3. Reject all tag poses where the ambiguity ratio (availiable via PhotonLib) is greater than 0.2.
diff --git a/source/docs/apriltag-pipelines/about-apriltags.rst b/source/docs/apriltag-pipelines/about-apriltags.rst
new file mode 100644
index 00000000..0f571f57
--- /dev/null
+++ b/source/docs/apriltag-pipelines/about-apriltags.rst
@@ -0,0 +1,13 @@
+About Apriltags
+===============
+
+.. image:: images/pv-apriltag.png
+ :align: center
+ :scale: 20 %
+
+AprilTags are a common type of visual fiducial marker. Visual fiducial markers are artificial landmarks added to a scene to allow "localization" (finding your current position) via images. In simpler terms, tags mark known points of reference that you can use to find your current location. They are similar to QR codes in which they encode information, however, they hold only a single number. By placing AprilTags in known locations around the field and detecting them using PhotonVision, you can easily get full field localization / pose estimation. Alternatively, you can use AprilTags the same way you used retroreflective tape, simply using them to turn to goal without any pose estimation.
+
+A more technical explanation can be found in the `WPILib documentation `_.
+
+.. note:: You can get FIRST's `official PDF of the targets used in 2023 here `_.
+
diff --git a/source/docs/apriltag-pipelines/coordinate-systems.rst b/source/docs/apriltag-pipelines/coordinate-systems.rst
new file mode 100644
index 00000000..3316b98a
--- /dev/null
+++ b/source/docs/apriltag-pipelines/coordinate-systems.rst
@@ -0,0 +1,34 @@
+Coordiante Systems
+==================
+
+Field and Robot Coordiante Frame
+--------------------------------
+
+PhotonVision follows the WPILib conventions for the robot and field coordinate-systems, as defined `here `_.
+
+You define the camera to robot transform in the robot coordinate frame.
+
+Camera Coordinate Frame
+-----------------------
+
+The camera coordinate system is defined as follows, relative to the camera sensor itself, and when looking in the same direction as the sensor points:
+
+* The origin is the center.
+* The x-axis points to the left
+* The y-axis points up.
+* The z-axis points out toward the subject.
+
+AprilTag Coordinate Frame
+-------------------------
+
+The AprilTag coordinate system is defined as follows, relative to the center of the AprilTag itself, and when viewing the tag as a robot would.=:
+
+* The origin is the center.
+* The x-axis points to your right
+* The y-axis points upwards.
+* The z-axis is normal to the plane the tag is printed on, pointing outward from the visible side of the tag.
+
+.. image:: images/apriltag-coords.png
+ :align: center
+ :scale: 50%
+ :alt: AprilTag Coordinate System
diff --git a/source/docs/apriltag-pipelines/detector-types.rst b/source/docs/apriltag-pipelines/detector-types.rst
new file mode 100644
index 00000000..c22d5d03
--- /dev/null
+++ b/source/docs/apriltag-pipelines/detector-types.rst
@@ -0,0 +1,15 @@
+AprilTag Pipeline Types
+=======================
+
+PhotonVision offers two different AprilTag pipeline types based on different implementations of the underlying algorithm. Each one has its advantages / disadvatages, which are detailed below.
+
+.. note:: Note that both of these pipeline types detect AprilTag markers and are just two different algorithms for doing so.
+
+AprilTag
+--------
+
+The AprilTag pipeline type is based on the `AprilTag `_ library from the University of Michigan and we recommend it for most use cases. It is (to our understanding) most accurate pipeline type, but is also ~2x slower than AruCo. This was the pipeline type used by teams in the 2023 season and is well tested.
+
+AruCo
+-----
+The AruCo pipeline is based on the `AruCo `_ library implementation from OpenCV. It is ~2x higher fps and ~2x lower latency than the AprilTag pipeline type, but is less accurate. We recommend this pipeline type for teams that need to run at a higher framerate or have a lower powered device. This pipeline type is new for the 2024 season and is not as well tested as AprilTag.
diff --git a/source/docs/apriltag-pipelines/images/apriltag-coords.png b/source/docs/apriltag-pipelines/images/apriltag-coords.png
new file mode 100644
index 00000000..a70eecaa
Binary files /dev/null and b/source/docs/apriltag-pipelines/images/apriltag-coords.png differ
diff --git a/source/docs/getting-started/pipeline-tuning/images/apriltag-tune.png b/source/docs/apriltag-pipelines/images/apriltag-tune.png
similarity index 100%
rename from source/docs/getting-started/pipeline-tuning/images/apriltag-tune.png
rename to source/docs/apriltag-pipelines/images/apriltag-tune.png
diff --git a/source/docs/apriltag-pipelines/images/apriltag.png b/source/docs/apriltag-pipelines/images/apriltag.png
new file mode 100644
index 00000000..dceda348
Binary files /dev/null and b/source/docs/apriltag-pipelines/images/apriltag.png differ
diff --git a/source/docs/apriltag-pipelines/images/camera-coord.png b/source/docs/apriltag-pipelines/images/camera-coord.png
new file mode 100644
index 00000000..7bf3322e
Binary files /dev/null and b/source/docs/apriltag-pipelines/images/camera-coord.png differ
diff --git a/source/docs/getting-started/assets/apriltag.png b/source/docs/apriltag-pipelines/images/pv-apriltag.png
similarity index 100%
rename from source/docs/getting-started/assets/apriltag.png
rename to source/docs/apriltag-pipelines/images/pv-apriltag.png
diff --git a/source/docs/apriltag-pipelines/index.rst b/source/docs/apriltag-pipelines/index.rst
new file mode 100644
index 00000000..920b4cdb
--- /dev/null
+++ b/source/docs/apriltag-pipelines/index.rst
@@ -0,0 +1,11 @@
+AprilTag Detection
+==================
+
+.. toctree::
+
+ about-apriltags
+ detector-types
+ 2D-tracking-tuning
+ 3D-tracking
+ multitag
+ coordinate-systems
diff --git a/source/docs/apriltag-pipelines/multitag.rst b/source/docs/apriltag-pipelines/multitag.rst
new file mode 100644
index 00000000..d55ab0b0
--- /dev/null
+++ b/source/docs/apriltag-pipelines/multitag.rst
@@ -0,0 +1,4 @@
+MultiTag Localization
+=====================
+
+Coming soon!
diff --git a/source/docs/getting-started/assets/AprilTag16h5.pdf b/source/docs/assets/AprilTag16h5.pdf
similarity index 100%
rename from source/docs/getting-started/assets/AprilTag16h5.pdf
rename to source/docs/assets/AprilTag16h5.pdf
diff --git a/source/docs/hardware/images/settings.png b/source/docs/assets/settings.png
similarity index 100%
rename from source/docs/hardware/images/settings.png
rename to source/docs/assets/settings.png
diff --git a/source/docs/getting-started/pipeline-tuning/calibration.rst b/source/docs/calibration/calibration.rst
similarity index 88%
rename from source/docs/getting-started/pipeline-tuning/calibration.rst
rename to source/docs/calibration/calibration.rst
index adbeaffe..c34da24a 100644
--- a/source/docs/getting-started/pipeline-tuning/calibration.rst
+++ b/source/docs/calibration/calibration.rst
@@ -37,14 +37,14 @@ Accurate camera calibration is required in order to get accurate pose measuremen
Following the ideas above should help in getting an accurate calibration.
Calibration Steps
-=================
+-----------------
-Your camera can be calibrated using either the utility built into PhotonVision, which performs all the calculations on your coprocessor, or using a website such as `calibdb `, which uses a USB webcam connected to your laptop. The integrated calibration utility is currently the only one that works with ribbon-cable CSI cameras or Limelights, but for USB webcams, calibdb is the preferred option.
+Your camera can be calibrated using either the utility built into PhotonVision, which performs all the calculations on your coprocessor, or using a website such as `calibdb `_, which uses a USB webcam connected to your laptop. The integrated calibration utility is currently the only one that works with ribbon-cable CSI cameras or Limelights, but for USB webcams, calibdb is the preferred option.
Calibrating using calibdb
-------------------------
-Calibdb uses a modified chessboard/aruco marker combination target called `ChArUco targets `. The website currently only supports Chrome browser.
+Calibdb uses a modified chessboard/aruco marker combination target called `ChArUco targets. `_ The website currently only supports Chrome browser.
Download and print out (or display on a monitor) the calibration by clicking Show Pattern. Click "Calibrate" and align your camera with the ghost overlay of the calibration board. The website automatically calculates the next position and displays it for you. When complete, download the calibration (do **not** use the OpenCV format). Reconnect your camera to your coprocessor and navigate to the PhotonVision web interface's camera tab. Ensure the correct camera is selected, and click the "Import from CalibDB" button. Your calibration data will be automatically saved and applied!
@@ -75,14 +75,14 @@ Now, we'll capture images of our chessboard from various angles. The most import
.. raw:: html
Accessing Calibration Images
----------------------------
-For advanced users, these calibrations can be later accessed by :ref:`exporting your config directory ` and viewing the camera's config.json file. Furthermore, the most recent snapshots will be saved to the calibImgs directory. The example images below are from `the calibdb website ` -- focus on how the target is oriented, as the same general tips for positioning apply for chessboard targets as for ChArUco.
+For advanced users, these calibrations can be later accessed by :ref:`exporting your config directory ` and viewing the camera's config.json file. Furthermore, the most recent snapshots will be saved to the calibImgs directory. The example images below are from `the calibdb website ` -- focus on how the target is oriented, as the same general tips for positioning apply for chessboard targets as for ChArUco.
.. image:: images/calibImgs.png
:width: 600
diff --git a/source/docs/getting-started/pipeline-tuning/images/calibImgs.png b/source/docs/calibration/images/calibImgs.png
similarity index 100%
rename from source/docs/getting-started/pipeline-tuning/images/calibImgs.png
rename to source/docs/calibration/images/calibImgs.png
diff --git a/source/docs/contributing/photonvision/build-instructions.rst b/source/docs/contributing/photonvision/build-instructions.rst
index 87ba8806..734b10ca 100644
--- a/source/docs/contributing/photonvision/build-instructions.rst
+++ b/source/docs/contributing/photonvision/build-instructions.rst
@@ -58,8 +58,8 @@ In the root directory:
``gradlew buildAndCopyUI``
-Build and Run the Source
-~~~~~~~~~~~~~~~~~~~~~~~~
+Build and Run PhotonVision
+~~~~~~~~~~~~~~~~~~~~~~~~~~
To compile and run the project, issue the following command in the root directory:
@@ -93,6 +93,32 @@ Running the following command under the root directory will build the jar under
``gradlew shadowJar``
+Build and Run PhotonVision on a Raspberry Pi Coprocessor
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+As a convinenece, the build has built in `deploy` command which builds, deploys, and starts the current source code on a coprocessor.
+
+An architecture override is required to specify the deploy target's architecture.
+
+.. tab-set::
+
+ .. tab-item:: Linux
+
+ ``./gradlew clean``
+ ``./gradlew deploy -PArchOverride=linuxarm64``
+
+ .. tab-item:: macOS
+
+ ``./gradlew clean``
+ ``./gradlew deploy -PArchOverride=linuxarm64``
+
+ .. tab-item:: Windows (cmd)
+
+ ``gradlew clean``
+ ``gradlew deploy -PArchOverride=linuxarm64``
+
+The ``deploy`` command is tested against Raspberry Pi coprocessors. Other similar coprocessors may work too.
+
Using PhotonLib Builds
~~~~~~~~~~~~~~~~~~~~~~
@@ -139,10 +165,10 @@ After adding the generated vendordep to your project, add the following to your
}
-Debugging a local PhotonVision build
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Debugging PhotonVision Running Locally
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-One way is by running the program using gradle with the :code:`--debug-jvm` flag. Run the program with :code:`./gradlew run --debug-jvm`, and attach to it with VSCode by adding the following to launch.json. Note args can be passed with :code:`--args="foobar"`.
+One way is by running the program using gradle with the :code:`--debug-jvm` flag. Run the program with :code:`./gradlew run --debug-jvm`, and attach to it with VSCode by adding the following to :code:`launch.json`. Note args can be passed with :code:`--args="foobar"`.
.. code-block::
@@ -165,6 +191,39 @@ One way is by running the program using gradle with the :code:`--debug-jvm` flag
PhotonVision can also be run using the gradle tasks plugin with :code:`"args": "--debug-jvm"` added to launch.json.
+
+Debugging PhotonVision Running on a CoProcessor
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Set up a VSCode configuration in :code:`launch.json`
+
+.. code-block::
+
+ {
+ // Use IntelliSense to learn about possible attributes.
+ // Hover to view descriptions of existing attributes.
+ // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
+ "version": "0.2.0",
+ "configurations": [
+ {
+ "type": "java",
+ "name": "Attach to CoProcessor",
+ "request": "attach",
+ "hostName": "photonvision.local",
+ "port": "5801",
+ "projectName": "photon-core"
+ },
+ ]
+ }
+
+Stop any existing instance of PhotonVision.
+
+Launch the program with the following additional argument to the JVM: :code:`java -jar -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5801 photonvision.jar`
+
+Once the program says it is listening on port 5801, launch the debug configuration in VSCode.
+
+The program will wait for the VSCode debugger to attach before proceeding.
+
Running examples
~~~~~~~~~~~~~~~~
diff --git a/source/docs/getting-started/description.rst b/source/docs/description.rst
similarity index 100%
rename from source/docs/getting-started/description.rst
rename to source/docs/description.rst
diff --git a/source/docs/examples/aimingatatarget.rst b/source/docs/examples/aimingatatarget.rst
index edb2d6c7..53c077f9 100644
--- a/source/docs/examples/aimingatatarget.rst
+++ b/source/docs/examples/aimingatatarget.rst
@@ -13,7 +13,7 @@ Knowledge and Equipment Needed
Code
-------
-Now that you have properly set up your vision system and have tuned a pipeline, you can now aim your robot/turret at the target using the data from PhotonVision. This data is reported over NetworkTables and includes: latency, whether there is a target detected or not, pitch, yaw, area, skew, and target pose relative to the robot. This data will be used/manipulated by our vendor dependency, PhotonLib. The documentation for the Network Tables API can be found :ref:`here ` and the documentation for PhotonLib :ref:`here `.
+Now that you have properly set up your vision system and have tuned a pipeline, you can now aim your robot/turret at the target using the data from PhotonVision. This data is reported over NetworkTables and includes: latency, whether there is a target detected or not, pitch, yaw, area, skew, and target pose relative to the robot. This data will be used/manipulated by our vendor dependency, PhotonLib. The documentation for the Network Tables API can be found :ref:`here ` and the documentation for PhotonLib :ref:`here `.
For this simple example, only yaw is needed.
diff --git a/source/docs/examples/apriltag.rst b/source/docs/examples/apriltag.rst
deleted file mode 100644
index 339a9ac3..00000000
--- a/source/docs/examples/apriltag.rst
+++ /dev/null
@@ -1,19 +0,0 @@
-Using AprilTags for Pose Estimation
-===================================
-
-The following example is from the PhotonLib example repository (`Java `_).
-
-
-Knowledge and Equipment Needed
-------------------------------
-
-- Everything required in :ref:`Aiming at a Target `.
-- Large space where your robot can move around freely
-- An open space with properly mounted 16h5 AprilTags
-- PhotonVision running on your laptop or a coprocessor
-
-This example will show you how to use AprilTags for full field robot localization using ``PhotonPoseEstimator``, ``AprilTagFieldLayout``, and the WPILib Pose Estimation Classes.
-
-All PhotonVision specific code is in ``PhotonCameraWrapper.java`` and the relevant pose estimation parts are in ``DriveTrain.java.``
-
-Please note that this code does not support simulation in the traditional sense (properly simulating each target that can be detected within sim), but you can still see the pose the camera is returning from the tags using Glass / Field2d when you are running PhotonVision on a robot. Make sure you properly set your ip/hostname in ``Robot.java`` when doing this.
diff --git a/source/docs/examples/index.rst b/source/docs/examples/index.rst
index 0f1745b2..b7407489 100644
--- a/source/docs/examples/index.rst
+++ b/source/docs/examples/index.rst
@@ -7,6 +7,5 @@ Code Examples
aimingatatarget
gettinginrangeofthetarget
aimandrange
- apriltag
simaimandrange
simposeest
diff --git a/source/docs/getting-started/april-tags.rst b/source/docs/getting-started/april-tags.rst
deleted file mode 100644
index f5586f6b..00000000
--- a/source/docs/getting-started/april-tags.rst
+++ /dev/null
@@ -1,31 +0,0 @@
-AprilTags
-=========
-
-.. image:: assets/apriltag.png
- :align: center
- :scale: 20 %
-
-.. important:: For the 2023 FRC Game, FIRST HQ has announced that visual fiducial markers (AprilTags) will be used on the field in addition to retroreflective tape. More information can be found in the `blog post here `_. Get ahead of the game by setting up PhotonVision and start detecting AprilTags in the offseason so you're ready for whatever the 2023 game has to offer!
-
-About AprilTags
-^^^^^^^^^^^^^^^
-
-AprilTags are a type of visual fiducial marker that is commonly used within robotics and computer vision applications. Visual fiducial markers are artificial landmarks added to a scene to allow "localization" (finding your current position) via images. In simpler terms, it is something that can act as a known point of reference that you can use to find your current location. They are similar to QR codes in which they encode information, however, they hold much less data. This has the added benefit of being much easier to track from long distances and at low resolutions. By placing AprilTags in known locations around the field and detecting them using PhotonVision, you can easily get full field localization / pose estimation. Alternatively, you can use AprilTags the same way you used retroreflective tape, simply using them to turn to goal without any pose estimation.
-
-A more technical explanation can be found in the `WPILib documentation `_.
-
-.. note:: You can get FIRST's `official PDF of the targets used in 2023 here `_.
-
-Getting Started With AprilTags
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-1. Install PhotonVision, wire your coprocessor, and get the dashboard up.
-
-.. note:: When selecting the image during installation, ensure you use one from the 2022 beta or 2023 stable release.
-
-2. Read the documentation in the :ref:`pipeline tuning section` about how to tune a pipeline for AprilTags.
-
-3. Read page on :ref:`Robot Integration Strategies with AprilTags` on different approaches to using the data you get from AprilTags. This includes simply turning to the goal, getting the pose of the target, all the way to real-time, latency compensated pose estimation.
-
-4. Read the :ref:`PhotonLib documentation` on how to use AprilTag data in your code.
-
-5. Read the :ref:`example code` on a fully featured example on different ways to use AprilTags.
diff --git a/source/docs/getting-started/index.rst b/source/docs/getting-started/index.rst
deleted file mode 100644
index 48566921..00000000
--- a/source/docs/getting-started/index.rst
+++ /dev/null
@@ -1,14 +0,0 @@
-:orphan:
-
-Getting Started
-===============
-
-.. toctree::
- :maxdepth: 2
-
- installation/index
-
-.. toctree::
- :maxdepth: 2
-
- pipeline-tuning/index
diff --git a/source/docs/getting-started/pipeline-tuning/index.rst b/source/docs/getting-started/pipeline-tuning/index.rst
deleted file mode 100644
index 041b62df..00000000
--- a/source/docs/getting-started/pipeline-tuning/index.rst
+++ /dev/null
@@ -1,14 +0,0 @@
-Pipeline Tuning
-===============
-
-.. toctree::
- :maxdepth: 0
- :titlesonly:
-
- about-pipelines
- input
- reflectiveAndShape/index
- apriltag-tuning
- output
- calibration
-
diff --git a/source/docs/getting-started/pipeline-tuning/images/motionblur.gif b/source/docs/hardware/images/motionblur.gif
similarity index 100%
rename from source/docs/getting-started/pipeline-tuning/images/motionblur.gif
rename to source/docs/hardware/images/motionblur.gif
diff --git a/source/docs/hardware/index.rst b/source/docs/hardware/index.rst
index a4cb5e79..1e4a2f3b 100644
--- a/source/docs/hardware/index.rst
+++ b/source/docs/hardware/index.rst
@@ -1,11 +1,9 @@
-Hardware
-========
+Hardware Selection
+==================
.. toctree::
:maxdepth: 2
- supportedhardware
+ selecting-hardware
picamconfig
customhardware
- config
- Settings
diff --git a/source/docs/hardware/picamconfig.rst b/source/docs/hardware/picamconfig.rst
index 68c1d51f..4e053a8c 100644
--- a/source/docs/hardware/picamconfig.rst
+++ b/source/docs/hardware/picamconfig.rst
@@ -52,4 +52,4 @@ Save the file, close the editor, and eject the drive. The boot configuration sho
Additional Information
----------------------
-See `the libcamera documentation `_ for more details on configuring cameras.
+See `the libcamera documentation `_ for more details on configuring cameras.
diff --git a/source/docs/hardware/selecting-hardware.rst b/source/docs/hardware/selecting-hardware.rst
new file mode 100644
index 00000000..440e8128
--- /dev/null
+++ b/source/docs/hardware/selecting-hardware.rst
@@ -0,0 +1,101 @@
+Selecting Hardware
+==================
+
+In order to use PhotonVision, you need a coprocessor and a camera. This page will help you select the right hardware for your team depending on your budget, needs, and experience.
+
+Choosing a Coprocessor
+----------------------
+
+Minimum System Requirements
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* Ubuntu 22.04 LTS or Windows 10/11
+ * We don't reccomend using Windows for anything except testing out the system on a local machine.
+* CPU: ARM Cortex-A53 (the CPU on Raspberry Pi 3) or better
+* At least 8GB of storage
+* 2GB of RAM
+ * PhotonVision isn't very RAM intensive, but you'll need at least 2GB to run the OS and PhotonVision.
+* The following IO:
+ * At least 1 USB or MIPI-CSI port for the camera
+ * Note that we only support using the Raspberry Pi's MIPI-CSI port, other MIPI-CSI ports from other coprocessors may not work.
+ * Ethernet port for networking
+
+Coprocessor Reccomendations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When selecting a coprocessor, it is important to consider various factors, particularly when it comes to AprilTag detection. Opting for a coprocessor with a more powerful CPU can generally result in higher FPS AprilTag detection, leading to more accurate pose estimation. However, it is important to note that there is a point of diminishing returns, where the benefits of a more powerful CPU may not outweigh the additional cost. Below is a list of supported hardware, along with some notes on each.
+
+* Orange Pi 5 ($99)
+ * This is the recommended coprocessor for most teams. It has a powerful CPU that can handle AprilTag detection at high FPS, and is relatively cheap compared to processors of a similar power.
+* Raspberry Pi 4/5 ($55-$80)
+ * This is the recommended coprocessor for teams on a budget. It has a less powerful CPU than the Orange Pi 5, but is still capable of running PhotonVision at a reasonable FPS.
+* Mini PCs (such as Beelink N5095)
+ * This coprcoessor will likely have similar performance to the Orange Pi 5 but has a higher performance ceiling (when using more powerful CPUs). Do note that this would require extra effort to wire to the robot / get set up. More information can be found in the set up guide `here. `_
+* Other coprocessors can be used but may require some extra work / command line usage in order to get it working properly.
+
+Choosing a Camera
+-----------------
+
+PhotonVision works with Pi Cameras and most USB Cameras, the recommendations below are known to be working and have been tested. Other cameras such as webcams, virtual cameras, etc. are not officially supported and may not work. It is important to note that fisheye cameras should only be used as a driver camera and not for detecting targets.
+
+PhotonVision relies on `CSCore `_ to detect and process cameras, so camera support is determined based off compatibility with CScore along with native support for the camera within your OS (ex. `V4L compatibility `_ if using a Linux machine like a Raspberry Pi).
+
+.. note::
+ Logitech Cameras and integrated laptop cameras will not work with PhotonVision due to oddities with their drivers. We recommend using a different camera.
+
+.. note::
+ We do not currently support the usage of two of the same camera on the same coprocessor. You can only use two or more cameras if they are of different models or they are from Arducam, which has a `tool that allows for cameras to be renamed `_.
+
+Reccomended Cameras
+^^^^^^^^^^^^^^^^^^^
+For colored shape detection, any non-fisheye camera supported by PhotonVision will work. We reccomend the Pi Camera V1 or a high fps USB camera.
+
+For driver camera, we reccomend a USB camera with a fisheye lens, so your driver can see more of the field.
+
+For AprilTag detection, we reccomend you use a global shutter camera that has ~100 degree diagonal FOV. This will allow you to see more AprilTags in frame, and will allow for more accurate pose estimation. You also want a camera that supports high FPS, as this will allow you to update your pose estimator at a higher frequency.
+
+* Reccomendations For AprilTag Detection
+ * Arducam USB OV9281
+ * This is the reccomended camera for AprilTag detection as it is a high FPS, global shutter camera USB camera that has a ~70 degree FOV.
+ * Innomaker OV9281
+ * Spinel AR0144
+ * Pi Camera Module V1
+ * The V1 is strongly preferred over the V2 due to the V2 having undesirable FOV choices
+
+AprilTags and Motion Blur
+^^^^^^^^^^^^^^^^^^^^^^^^^
+When detecting AprilTags, you want to reduce the "motion blur" as much as possible. Motion blur is the visual streaking/smearing on the camera stream as a result of movement of the camera or object of focus. You want to mitigate this as much as possible because your robot is constantly moving and you want to be able to read as many tags as you possibly can. The possible solutions to this include:
+
+1. Cranking your exposure as low as it goes and increasing your gain/brightness. This will decrease the effects of motion blur and increase FPS.
+2. Using a global shutter (as opposed to rolling shutter) camera. This should eliminate most, if not all motion blur.
+3. Only rely on tags when not moving.
+
+.. image:: images/motionblur.gif
+ :align: center
+
+Using Multiple Cameras
+^^^^^^^^^^^^^^^^^^^^^^
+
+Using multiple cameras on your robot will help you detect more AprilTags at once and improve your pose estimation as a result. In order to use multiple cameras, you will need to create multiple PhotonPoseEstimators and add all of their measurements to a single drivetrain pose estimator. Please note that the accuracy of your robot to camera transform is especially important when using multiple cameras as any error in the transform will cause your pose estimations to "fight" each other. For more information, see :ref:`the programming reference. `.
+
+
+Performance Matrix
+------------------
+
+.. raw:: html
+
+
+
+Please submit performance data to be added to the matrix here:
+
+.. raw:: html
+
+
diff --git a/source/docs/hardware/supportedhardware.rst b/source/docs/hardware/supportedhardware.rst
deleted file mode 100644
index d21e7cb2..00000000
--- a/source/docs/hardware/supportedhardware.rst
+++ /dev/null
@@ -1,101 +0,0 @@
-Supported Hardware
-==================
-
-PhotonVision is developed and tested on a number of Commercial, Off-the-Shelf (COTS) vision processing hardware solutions.
-
-Supported Cameras
------------------
-
-PhotonVision works with Pi Cameras and most USB Cameras, the recommendations below are known to be working and have been tested. Other cameras such as webcams, virtual cameras, etc. are not officially supported and may not work. It is important to note that fisheye cameras should only be used as a driver camera and not for detecting targets.
-
-PhotonVision relies on `CSCore `_ to detect and process cameras, so camera support is determined based off compatibility with CScore along with native support for the camera within your OS (ex. `V4L compatibility `_ if using a Linux machine like a Raspberry Pi).
-
-Pi cameras are always recommended over USB cameras as they have lower latency and better performance compared to your average USB Camera.
-
-* `Pi Camera Module V1 `_ (General Target Tracking)
-
- * The V1 is strongly preferred over the V2 due to the V2 having undesirable FOV choices
-
-* `Innomaker OV9281 Global Shutter Camera `_ (AprilTag Tracking)
-
-.. note:: Note that there are many CSI based OV9281 cameras but this is the only one that has been tested by the development team.
-
-* `Arducam USB OV9281 Global Shutter Camera `_ (AprilTag Tracking)
-
-* `720p ELP Camera `_ (Retroreflective Target Tracking)
-
-* `Microsoft LifeCam HD-3000 `_ (Driver Camera)
-
-* `720p Fisheye ELP Camera `_ (Driver Camera)
-
-.. note:: If you test a camera and find that it works with PhotonVision, we encourage you to submit that camera to the performance matrix below.
-
-.. warning::
-
- The following cameras / setups are known to not work:
-
- * Using two of the same USB cameras does not currently work because it is hard to identify the two different cameras.
-
- * Most Logitech cameras (specifically the Logitech C270 HD Webcam (PN: 960-000694)) will not work with PhotonVision.
-
- * The PS3Eye needs a workaround to be usable, for more information see :ref:`our Known Issues page `
-
- * Most laptop integrated webcams
-
-Supported Coprocessors
-----------------------
-* Raspberry Pi 3 / 4, with the newest variants of each being preferred (3B+ and B, respectively).
-* Raspberry Pi 4 is preferred for all forms of target tracking.
-* Orange Pi 4 / 5 will have better performance but will require more work to get working.
-* Mini PCs (such as Beelink N5095) have been testing and show significantly better performance than a Raspberry Pi, but require extra effort to wire to the robot / get set up. More information can be found in the set up guide `here. `_
-* Other coprocessors can be used but may require some extra work / command line usage in order to get it working properly.
-
-Performance Matrix
-------------------
-
-.. raw:: html
-
-
-
-Please submit performance data to be added to the matrix here:
-
-.. raw:: html
-
-
-
-
-Support Levels
---------------
-.. list-table::
- :widths: 15 30 45
- :header-rows: 1
-
- * - Support Level
- - Support Qualities
- - Hardware
- * - Fully Supported
- - * Full discord help
- * All features will work
- * Everything will be kept up to date
- - * Gloworm
- * Raspberry Pi 3 and Raspberry Pi 4 with the official Pi image with the Pi Cam or CSCore compatible USB Cameras
- * - Compatible
- - * No guarantee of support on Discord
- * Major features will work
- * We hope to keep things up to date
- - * Linux (aarch64, armv7, x86_64)
- * Windows (x86_64)
- * - Unsupported
- - * Told to use something else
- * Won't try to make these work/update them
- * No guarantees
- - * macOS
- * Anything not listed above
diff --git a/source/docs/installation/images/networking-diagram.png b/source/docs/installation/images/networking-diagram.png
new file mode 100644
index 00000000..dcc57fe0
Binary files /dev/null and b/source/docs/installation/images/networking-diagram.png differ
diff --git a/source/docs/getting-started/installation/static.png b/source/docs/installation/images/static.png
similarity index 100%
rename from source/docs/getting-started/installation/static.png
rename to source/docs/installation/images/static.png
diff --git a/source/docs/getting-started/installation/index.rst b/source/docs/installation/index.rst
similarity index 96%
rename from source/docs/getting-started/installation/index.rst
rename to source/docs/installation/index.rst
index e1012660..9f835402 100644
--- a/source/docs/getting-started/installation/index.rst
+++ b/source/docs/installation/index.rst
@@ -1,5 +1,5 @@
-Getting Started
-===============
+Installation & Setup
+====================
This page will help you install PhotonVision on your coprocessor, wire it, and properly setup the networking in order to start tracking targets.
diff --git a/source/docs/getting-started/installation/networking.rst b/source/docs/installation/networking.rst
similarity index 88%
rename from source/docs/getting-started/installation/networking.rst
rename to source/docs/installation/networking.rst
index 30ab39dc..1d2ec305 100644
--- a/source/docs/getting-started/installation/networking.rst
+++ b/source/docs/installation/networking.rst
@@ -7,9 +7,11 @@ Physical Networking
After imaging your coprocessor, run an ethernet cable from your coprocessor to a router/radio and power on your coprocessor by plugging it into the wall. Then connect whatever device you're using to view the webdashboard to the same network and navigate to photonvision.local:5800.
-PhotonVision *STRONGLY* recommends the usage of a network switch on your robot. This is because the second radio port on the current FRC radios is known to be buggy and cause frequent connection issues that are detrimental during competition. More information can be found in this `ChiefDelphi thread `_ and an in-depth guide on how to install a network switch can be found `on FRC 900's website `_.
+PhotonVision *STRONGLY* recommends the usage of a network switch on your robot. This is because the second radio port on the current FRC radios is known to be buggy and cause frequent connection issues that are detrimental during competition. An in-depth guide on how to install a network switch can be found `on FRC 900's website `_.
+.. image:: images/networking-diagram.png
+ :alt: Correctly set static IP
Digital Networking
------------------
@@ -29,7 +31,7 @@ PhotonVision *STRONGLY* recommends the usage of Static IPs as it increases relia
Power-cycle your robot and then you will now be access the PhotonVision dashboard at ``10.TE.AM.11:5800``.
-.. image:: static.png
+.. image:: images/static.png
:alt: Correctly set static IP
Port Forwarding
diff --git a/source/docs/getting-started/installation/sw_install/advanced-cmd.rst b/source/docs/installation/sw_install/advanced-cmd.rst
similarity index 100%
rename from source/docs/getting-started/installation/sw_install/advanced-cmd.rst
rename to source/docs/installation/sw_install/advanced-cmd.rst
diff --git a/source/docs/getting-started/installation/sw_install/files/Limelight2+/hardwareConfig.json b/source/docs/installation/sw_install/files/Limelight2+/hardwareConfig.json
similarity index 100%
rename from source/docs/getting-started/installation/sw_install/files/Limelight2+/hardwareConfig.json
rename to source/docs/installation/sw_install/files/Limelight2+/hardwareConfig.json
diff --git a/source/docs/getting-started/installation/sw_install/files/Limelight2/hardwareConfig.json b/source/docs/installation/sw_install/files/Limelight2/hardwareConfig.json
similarity index 100%
rename from source/docs/getting-started/installation/sw_install/files/Limelight2/hardwareConfig.json
rename to source/docs/installation/sw_install/files/Limelight2/hardwareConfig.json
diff --git a/source/docs/getting-started/installation/sw_install/gloworm.rst b/source/docs/installation/sw_install/gloworm.rst
similarity index 98%
rename from source/docs/getting-started/installation/sw_install/gloworm.rst
rename to source/docs/installation/sw_install/gloworm.rst
index 56891602..84efd42f 100644
--- a/source/docs/getting-started/installation/sw_install/gloworm.rst
+++ b/source/docs/installation/sw_install/gloworm.rst
@@ -34,7 +34,7 @@ Download and run `Angry IP Scanner `_ to
.. image:: images/angryIP.png
-Once you find it, set the IP to a desired :ref:`static IP in PhotonVision. `
+Once you find it, set the IP to a desired :ref:`static IP in PhotonVision. `
Updating PhotonVision
---------------------
diff --git a/source/docs/getting-started/installation/sw_install/images/angryIP.png b/source/docs/installation/sw_install/images/angryIP.png
similarity index 100%
rename from source/docs/getting-started/installation/sw_install/images/angryIP.png
rename to source/docs/installation/sw_install/images/angryIP.png
diff --git a/source/docs/getting-started/installation/sw_install/images/nano.png b/source/docs/installation/sw_install/images/nano.png
similarity index 100%
rename from source/docs/getting-started/installation/sw_install/images/nano.png
rename to source/docs/installation/sw_install/images/nano.png
diff --git a/source/docs/getting-started/installation/sw_install/index.rst b/source/docs/installation/sw_install/index.rst
similarity index 100%
rename from source/docs/getting-started/installation/sw_install/index.rst
rename to source/docs/installation/sw_install/index.rst
diff --git a/source/docs/getting-started/installation/sw_install/limelight.rst b/source/docs/installation/sw_install/limelight.rst
similarity index 71%
rename from source/docs/getting-started/installation/sw_install/limelight.rst
rename to source/docs/installation/sw_install/limelight.rst
index 5bc68f5e..065a410c 100644
--- a/source/docs/getting-started/installation/sw_install/limelight.rst
+++ b/source/docs/installation/sw_install/limelight.rst
@@ -8,7 +8,7 @@ Limelight imaging is a very similar process to Gloworm, but with extra steps.
Base Install Steps
^^^^^^^^^^^^^^^^^^
-Due to the similarities in hardware, follow the :ref:`Gloworm install instructions `.
+Due to the similarities in hardware, follow the :ref:`Gloworm install instructions `.
Hardware-Specific Steps
@@ -18,7 +18,7 @@ Download the hardwareConfig.json file for the version of your Limelight:
- :download:`Limelight Version 2 `.
- :download:`Limelight Version 2+ `.
-:ref:`Import the hardwareConfig.json file `. Again, this is **REQUIRED** or target measurements will be incorrect, and LEDs will not work.
+:ref:`Import the hardwareConfig.json file `. Again, this is **REQUIRED** or target measurements will be incorrect, and LEDs will not work.
After installation you should be able to `locate the camera `_ at: ``http://photonvision.local:5800/`` (not ``gloworm.local``, as previously)
diff --git a/source/docs/getting-started/installation/sw_install/linux-pc.rst b/source/docs/installation/sw_install/linux-pc.rst
similarity index 100%
rename from source/docs/getting-started/installation/sw_install/linux-pc.rst
rename to source/docs/installation/sw_install/linux-pc.rst
diff --git a/source/docs/getting-started/installation/sw_install/mac-os.rst b/source/docs/installation/sw_install/mac-os.rst
similarity index 100%
rename from source/docs/getting-started/installation/sw_install/mac-os.rst
rename to source/docs/installation/sw_install/mac-os.rst
diff --git a/source/docs/getting-started/installation/sw_install/orange-pi.rst b/source/docs/installation/sw_install/orange-pi.rst
similarity index 93%
rename from source/docs/getting-started/installation/sw_install/orange-pi.rst
rename to source/docs/installation/sw_install/orange-pi.rst
index 61d854a3..f66511cf 100644
--- a/source/docs/getting-started/installation/sw_install/orange-pi.rst
+++ b/source/docs/installation/sw_install/orange-pi.rst
@@ -30,4 +30,4 @@ Plug your Orange Pi into a display via HDMI and plug in a keyboard via USB once
Installing PhotonVision
-----------------------
-From here, you can follow :ref:`this guide `.
+From here, you can follow :ref:`this guide `.
diff --git a/source/docs/getting-started/installation/sw_install/other-coprocessors.rst b/source/docs/installation/sw_install/other-coprocessors.rst
similarity index 93%
rename from source/docs/getting-started/installation/sw_install/other-coprocessors.rst
rename to source/docs/installation/sw_install/other-coprocessors.rst
index 2662ad08..27125343 100644
--- a/source/docs/getting-started/installation/sw_install/other-coprocessors.rst
+++ b/source/docs/installation/sw_install/other-coprocessors.rst
@@ -19,7 +19,7 @@ We provide an `install script `_ for other Debian-based sy
.. note:: Your co-processor will require an Internet connection for this process to work correctly.
-For installation on any other co-processors, we recommend reading the :ref:`advanced command line documentation `.
+For installation on any other co-processors, we recommend reading the :ref:`advanced command line documentation `.
Updating PhotonVision
---------------------
diff --git a/source/docs/getting-started/installation/sw_install/raspberry-pi.rst b/source/docs/installation/sw_install/raspberry-pi.rst
similarity index 98%
rename from source/docs/getting-started/installation/sw_install/raspberry-pi.rst
rename to source/docs/installation/sw_install/raspberry-pi.rst
index 6d77272d..034ff33d 100644
--- a/source/docs/getting-started/installation/sw_install/raspberry-pi.rst
+++ b/source/docs/installation/sw_install/raspberry-pi.rst
@@ -34,7 +34,7 @@ Download and run `Angry IP Scanner `_ to
.. image:: images/angryIP.png
-Once you find it, set the IP to a desired :ref:`static IP in PhotonVision. `
+Once you find it, set the IP to a desired :ref:`static IP in PhotonVision. `
Updating PhotonVision
---------------------
diff --git a/source/docs/getting-started/installation/sw_install/romi.rst b/source/docs/installation/sw_install/romi.rst
similarity index 83%
rename from source/docs/getting-started/installation/sw_install/romi.rst
rename to source/docs/installation/sw_install/romi.rst
index a7963b14..74eefb6b 100644
--- a/source/docs/getting-started/installation/sw_install/romi.rst
+++ b/source/docs/installation/sw_install/romi.rst
@@ -10,7 +10,7 @@ The WPILibPi image includes FRCVision, which reserves USB cameras; to use Photon
SSH into the Raspberry Pi (using a tool like `Putty `_ ) at the Romi's default address ``10.0.0.2``. The default user is ``pi``, and the password is ``raspberry``.
-Follow the process for installing PhotonVision on :ref:`"Other Debian-Based Co-Processor Installation" `. As it mentions this will require an internet connection so plugging into the ethernet jack on the Raspberry Pi will be the easiest solution. The pi must remain writable!
+Follow the process for installing PhotonVision on :ref:`"Other Debian-Based Co-Processor Installation" `. As it mentions this will require an internet connection so plugging into the ethernet jack on the Raspberry Pi will be the easiest solution. The pi must remain writable!
Next, from the SSH terminal, run ``sudo nano /home/pi/runCamera`` then arrow down to the start of the exec line and press "Enter" to add a new line. Then add ``#`` before the exec command to comment it out. Then, arrow up to the new line and type ``sleep 10000``. Hit "Ctrl + O" and then "Enter" to save the file. Finally press "Ctrl + X" to exit nano. Now, reboot the Romi by typing ``sudo reboot``.
diff --git a/source/docs/getting-started/installation/sw_install/snakeyes.rst b/source/docs/installation/sw_install/snakeyes.rst
similarity index 98%
rename from source/docs/getting-started/installation/sw_install/snakeyes.rst
rename to source/docs/installation/sw_install/snakeyes.rst
index 80c13024..fde6b7f7 100644
--- a/source/docs/getting-started/installation/sw_install/snakeyes.rst
+++ b/source/docs/installation/sw_install/snakeyes.rst
@@ -30,7 +30,7 @@ Download and run `Angry IP Scanner `_ to
.. image:: images/angryIP.png
-Once you find it, set the IP to a desired :ref:`static IP in PhotonVision. `
+Once you find it, set the IP to a desired :ref:`static IP in PhotonVision. `
Updating PhotonVision
----------------------
diff --git a/source/docs/getting-started/installation/sw_install/windows-pc.rst b/source/docs/installation/sw_install/windows-pc.rst
similarity index 100%
rename from source/docs/getting-started/installation/sw_install/windows-pc.rst
rename to source/docs/installation/sw_install/windows-pc.rst
diff --git a/source/docs/getting-started/installation/wiring.rst b/source/docs/installation/wiring.rst
similarity index 83%
rename from source/docs/getting-started/installation/wiring.rst
rename to source/docs/installation/wiring.rst
index 753c84c5..9a0f9bfa 100644
--- a/source/docs/getting-started/installation/wiring.rst
+++ b/source/docs/installation/wiring.rst
@@ -28,9 +28,9 @@ Coprocessor without Passive POE
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
1a. Option 1: Get a micro USB (may be USB-C if using a newer Pi) to USB-A cable and plug the USB A side into a regulator like `this `_. Then, wire the regulator into your PDP/PDB and the Micro USB / USB C into your coprocessor.
-1b. Option 2: Use a USB power bank to power your coprocessor. There are rules that regulate the usage of power banks so ensure that you aren't breaking them, more information can be found `here `_.
+1b. Option 2: Use a USB power bank to power your coprocessor. Refer to this year's robot rulebook on legal implementations of this.
-2. Run an ethernet cable from your Pi to your network switch / radio (we *STRONGLY* recommend the usage of a network switch, see the networking section for more info.)
+1. Run an ethernet cable from your Pi to your network switch / radio (we *STRONGLY* recommend the usage of a network switch, see the networking section for more info.)
------------------------------------------------------------
diff --git a/source/docs/integration/aprilTagStrategies.rst b/source/docs/integration/aprilTagStrategies.rst
index 8af97397..420baf4c 100644
--- a/source/docs/integration/aprilTagStrategies.rst
+++ b/source/docs/integration/aprilTagStrategies.rst
@@ -31,7 +31,7 @@ Global Pose Estimation / Pose Estimation Strategies
.. note:: See the previous page for more general information. Most of the information is the same except now the camera is supplying a ``Pose3D``.
-The nature of how AprilTags will be laid out makes it very likely that you will get multiple pose measurements within a single frame from seeing multiple targets. This requires strategies to fuse these observations together and get a "best guess" as to where your robot is. These strategies could include:
+The nature of how AprilTags will be laid out makes it very likely that you will get multiple pose measurements within a single frame from seeing multiple targets. This requires strategies to fuse these observations together and get a "best guess" as to where your robot is. The best way to do this is to use the corners from all visible AprilTags to estimate the robot's pose. This is done by using the ``PhotonPoseEstimator`` class and the "MULTI_TAG_PNP_ON_COPROCESSOR" strategy. Additional strategies include:
* A camera seeing multiple targets, taking the average of all the returned poses
* A camera seeing one target, with an assumed height off the ground, picking the pose which places it to the assumed height
@@ -41,4 +41,8 @@ The nature of how AprilTags will be laid out makes it very likely that you will
PhotonVision supports all of these different strategies via our ``PhotonPoseEstimator`` class that allows you to select one of the strategies above and get the relevant pose estimation.
-All of these strategies are valid approaches, and we recommend doing independent testing in order to see which one works best for your team / current game.
+Tuning Pose Estimators
+^^^^^^^^^^^^^^^^^^^^^^
+
+Coming soon!
+TODO: Add this back in once simposeest example is added.
diff --git a/source/docs/integration/simpleStrategies.rst b/source/docs/integration/simpleStrategies.rst
index 81527c4e..35005204 100644
--- a/source/docs/integration/simpleStrategies.rst
+++ b/source/docs/integration/simpleStrategies.rst
@@ -26,7 +26,7 @@ Range Alignment
By looking at the position of the target in the "vertical" direction in the image, and applying some trionometery, the distance between the robot and the camera can be deduced.
-1. Read the current distance to the target from the vision Coprocessor.
+1. Read the current distance to the target from the vision coprocessor.
2. If too far in one direction, command the drivetrain to travel in the opposite direction to compensate.
See the :ref:`Getting in Range of the Target ` example for more information.
diff --git a/source/docs/getting-started/pipeline-tuning/about-pipelines.rst b/source/docs/pipelines/about-pipelines.rst
similarity index 94%
rename from source/docs/getting-started/pipeline-tuning/about-pipelines.rst
rename to source/docs/pipelines/about-pipelines.rst
index c1cdd620..560deff3 100644
--- a/source/docs/getting-started/pipeline-tuning/about-pipelines.rst
+++ b/source/docs/pipelines/about-pipelines.rst
@@ -1,3 +1,5 @@
+:orphan:
+
About Pipelines
===============
@@ -19,12 +21,12 @@ Colored Shape
This pipeline type is based on detecting different shapes like circles, triangles, quadrilaterals, or a polygon. An example usage would be detecting yellow PowerCells from the 2020 FRC game. You can read more about the specific settings available in the contours page.
-AprilTag
---------
+AprilTag / AruCo
+----------------
This pipeline type is based on detecting AprilTag fiducial markers. More information about AprilTags can be found in the WPILib documentation. While being more performance intensive than the reflective and colored shape pipeline, it has the benefit of providing easy to use 3D pose information which allows localization.
-.. note:: In order to get 3D Pose data about AprilTags, you are required to :ref:`calibrate your camera`.
+.. note:: In order to get 3D Pose data about AprilTags, you are required to :ref:`calibrate your camera`.
Note About Multiple Cameras and Pipelines
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -43,7 +45,7 @@ Reflective and Colored Shape Pipelines have 4 steps (represented as 4 tabs):
4. Output: Now that you have filtered all of your contours, this allows you to manipulate the detected target via orientation, the offset point, and offset.
-AprilTag Pipelines have 3 steps:
+AprilTag / AruCo Pipelines have 3 steps:
1. Input: This is the same as the above.
2. AprilTag: This step include AprilTag specific tuning parameters, such as decimate, blur, threads, pose iterations, and more.
diff --git a/source/docs/pipelines/images/motionblur.gif b/source/docs/pipelines/images/motionblur.gif
new file mode 100644
index 00000000..e1a6f011
Binary files /dev/null and b/source/docs/pipelines/images/motionblur.gif differ
diff --git a/source/docs/getting-started/pipeline-tuning/images/pipelinetype.png b/source/docs/pipelines/images/pipelinetype.png
similarity index 100%
rename from source/docs/getting-started/pipeline-tuning/images/pipelinetype.png
rename to source/docs/pipelines/images/pipelinetype.png
diff --git a/source/docs/pipelines/index.rst b/source/docs/pipelines/index.rst
new file mode 100644
index 00000000..cf9f5581
--- /dev/null
+++ b/source/docs/pipelines/index.rst
@@ -0,0 +1,8 @@
+Pipelines
+=========
+
+.. toctree::
+
+ about-pipelines
+ input
+ output
diff --git a/source/docs/getting-started/pipeline-tuning/input.rst b/source/docs/pipelines/input.rst
similarity index 96%
rename from source/docs/getting-started/pipeline-tuning/input.rst
rename to source/docs/pipelines/input.rst
index 8b523c4d..553dbf02 100644
--- a/source/docs/getting-started/pipeline-tuning/input.rst
+++ b/source/docs/pipelines/input.rst
@@ -1,5 +1,5 @@
-Input
-=====
+Camera Tuning / Input
+=====================
PhotonVision's "Input" tab contains settings that affect the image captured by the currently selected camera. This includes camera exposure and brightness, as well as resolution and orientation.
@@ -21,10 +21,12 @@ Camera exposure and brightness control how bright the captured image will be, al
For reflective pipelines, after adjusting exposure and brightness, the target should be lit green (or the color of the vision tracking LEDs used). The more distinct the color of the target, the more likely it will be tracked reliably.
-.. note:: Unlike with retroreflective tape, fiducial tracking is not very dependent on lighting consistency. If you have trouble detecting tags due to low light, you may want to try increasing exposure, but this will likely decrease your achievable framerate.
+.. note:: Unlike with retroreflective tape, AprilTag tracking is not very dependent on lighting consistency. If you have trouble detecting tags due to low light, you may want to try increasing exposure, but this will likely decrease your achievable framerate.
+
AprilTags and Motion Blur
^^^^^^^^^^^^^^^^^^^^^^^^^
+
For AprilTag pipelines, your goal is to reduce the "motion blur" as much as possible. Motion blur is the visual streaking/smearing on the camera stream as a result of movement of the camera or object of focus. You want to mitigate this as much as possible because your robot is constantly moving and you want to be able to read as many tags as you possibly can. The possible solutions to this include:
1. Cranking your exposure as low as it goes and increasing your gain/brightness. This will decrease the effects of motion blur and increase FPS.
diff --git a/source/docs/getting-started/pipeline-tuning/output.rst b/source/docs/pipelines/output.rst
similarity index 100%
rename from source/docs/getting-started/pipeline-tuning/output.rst
rename to source/docs/pipelines/output.rst
diff --git a/source/docs/programming/index.rst b/source/docs/programming/index.rst
index 34a94f41..38d5fd4f 100644
--- a/source/docs/programming/index.rst
+++ b/source/docs/programming/index.rst
@@ -7,4 +7,3 @@ Programming Reference
:maxdepth: 1
photonlib/index
- nt-api
diff --git a/source/docs/programming/photonlib/getting-target-data.rst b/source/docs/programming/photonlib/getting-target-data.rst
index 0b1ad9db..5342ccc2 100644
--- a/source/docs/programming/photonlib/getting-target-data.rst
+++ b/source/docs/programming/photonlib/getting-target-data.rst
@@ -88,7 +88,7 @@ You can get a list of tracked targets using the ``getTargets()``/``GetTargets()`
Getting the Best Target
-----------------------
-You can get the :ref:`best target ` using ``getBestTarget()``/``GetBestTarget()`` (Java and C++ respectively) method from the pipeline result.
+You can get the :ref:`best target ` using ``getBestTarget()``/``GetBestTarget()`` (Java and C++ respectively) method from the pipeline result.
.. tab-set-code::
.. code-block:: java
diff --git a/source/docs/programming/photonlib/index.rst b/source/docs/programming/photonlib/index.rst
index b5ad4e16..d1d1c381 100644
--- a/source/docs/programming/photonlib/index.rst
+++ b/source/docs/programming/photonlib/index.rst
@@ -10,5 +10,3 @@ PhotonLib: Robot Code Interface
robot-pose-estimator
driver-mode-pipeline-index
controlling-led
- simulation
- hardware-in-the-loop-sim
diff --git a/source/docs/programming/photonlib/robot-pose-estimator.rst b/source/docs/programming/photonlib/robot-pose-estimator.rst
index 99c40b89..2d98169d 100644
--- a/source/docs/programming/photonlib/robot-pose-estimator.rst
+++ b/source/docs/programming/photonlib/robot-pose-estimator.rst
@@ -33,6 +33,9 @@ Creating a ``PhotonPoseEstimator``
----------------------------------
The PhotonPoseEstimator has a constructor that takes an ``AprilTagFieldLayout`` (see above), ``PoseStrategy``, ``PhotonCamera``, and ``Transform3d``. ``PoseStrategy`` has six possible values:
+* MULTI_TAG_PNP_ON_COPROCESSOR
+ * Calculates a new robot position estimate by combining all visible tag corners. Recommended for all teams as it will be the most accurate.
+ * Must configure the AprilTagFieldLayout properly in the UI, please see :ref:`here ` for more information.
* LOWEST_AMBIGUITY
* Choose the Pose with the lowest ambiguity.
* CLOSEST_TO_CAMERA_HEIGHT
@@ -43,8 +46,6 @@ The PhotonPoseEstimator has a constructor that takes an ``AprilTagFieldLayout``
* Choose the Pose which is closest to the last pose calculated.
* AVERAGE_BEST_TARGETS
* Choose the Pose which is the average of all the poses from each tag.
-* MULTI_TAG_PNP
- * Calculates a new robot position estimate by combining all visible tags.
.. tab-set-code::
.. code-block:: java
@@ -80,7 +81,7 @@ The PhotonPoseEstimator has a constructor that takes an ``AprilTagFieldLayout``
Using a ``PhotonPoseEstimator``
-------------------------------
-Calling ``update()`` on your ``PhotonPoseEstimator`` will return an ``EstimatedRobotPose``, which includes a ``Pose3d`` of the latest estimated pose (using the selected strategy) along with a ``double`` of the timestamp when the robot pose was estimated. You should be updating your `drivetrain pose estimator `_ with the result from the ``PhotonPoseEstimator`` every loop using ``addVisionMeasurement()``. See our `code example `_ for more.
+Calling ``update()`` on your ``PhotonPoseEstimator`` will return an ``EstimatedRobotPose``, which includes a ``Pose3d`` of the latest estimated pose (using the selected strategy) along with a ``double`` of the timestamp when the robot pose was estimated. You should be updating your `drivetrain pose estimator `_ with the result from the ``PhotonPoseEstimator`` every loop using ``addVisionMeasurement()``.
.. tab-set-code::
.. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/357d8a518a93f7a1f8084a79449249e613b605a7/photonlib-java-examples/apriltagExample/src/main/java/frc/robot/PhotonCameraWrapper.java
@@ -102,7 +103,7 @@ Calling ``update()`` on your ``PhotonPoseEstimator`` will return an ``EstimatedR
}
}
-You should be updating your `drivetrain pose estimator `_ with the result from the ``RobotPoseEstimator`` every loop using ``addVisionMeasurement()``. See our :ref:`code example ` for more.
+You should be updating your `drivetrain pose estimator `_ with the result from the ``RobotPoseEstimator`` every loop using ``addVisionMeasurement()``. TODO: add example note
Additional ``PhotonPoseEstimator`` Methods
------------------------------------------
diff --git a/source/docs/programming/photonlib/using-target-data.rst b/source/docs/programming/photonlib/using-target-data.rst
index d7e5513c..dd47a51a 100644
--- a/source/docs/programming/photonlib/using-target-data.rst
+++ b/source/docs/programming/photonlib/using-target-data.rst
@@ -3,6 +3,38 @@ Using Target Data
A ``PhotonUtils`` class with helpful common calculations is included within ``PhotonLib`` to aid teams in using target data in order to get positional information on the field. This class contains two methods, ``calculateDistanceToTargetMeters()``/``CalculateDistanceToTarget()`` and ``estimateTargetTranslation2d()``/``EstimateTargetTranslation()`` (Java and C++ respectively).
+Estimating Field Relative Pose with AprilTags
+---------------------------------------------
+``estimateFieldToRobotAprilTag(Transform3d cameraToTarget, Pose3d fieldRelativeTagPose, Transform3d cameraToRobot)`` returns your robot's ``Pose3d`` on the field using the pose of the AprilTag relative to the camera, pose of the AprilTag relative to the field, and the transform from the camera to the origin of the robot.
+
+.. tab-set-code::
+ .. code-block:: java
+
+ // Calculate robot's field relative pose
+ Pose3D robotPose = PhotonUtils.estimateFieldToRobotAprilTag(target.getBestCameraToTarget(), aprilTagFieldLayout.getTagPose(target.getFiducialId()), cameraToRobot);
+ .. code-block:: c++
+
+ //TODO
+
+Estimating Field Relative Pose (Traditional)
+--------------------------------------------
+
+You can get your robot's ``Pose2D`` on the field using various camera data, target yaw, gyro angle, target pose, and camera position. This method estimates the target's relative position using ``estimateCameraToTargetTranslation`` (which uses pitch and yaw to estimate range and heading), and the robot's gyro to estimate the rotation of the target.
+
+.. tab-set-code::
+ .. code-block:: java
+
+ // Calculate robot's field relative pose
+ Pose2D robotPose = PhotonUtils.estimateFieldToRobot(
+ kCameraHeight, kTargetHeight, kCameraPitch, kTargetPitch, Rotation2d.fromDegrees(-target.getYaw()), gyro.getRotation2d(), targetPose, cameraToRobot);
+
+ .. code-block:: c++
+
+ // Calculate robot's field relative pose
+ frc::Pose2D robotPose = photonlib::EstimateFieldToRobot(
+ kCameraHeight, kTargetHeight, kCameraPitch, kTargetPitch, frc::Rotation2d(units::degree_t(-target.GetYaw())), frc::Rotation2d(units::degree_t(gyro.GetRotation2d)), targetPose, cameraToRobot);
+
+
Calculating Distance to Target
------------------------------
If your camera is at a fixed height on your robot and the height of the target is fixed, you can calculate the distance to the target based on your camera's pitch and the pitch to the target.
@@ -52,37 +84,6 @@ You can get a `translation ` which can be done through the cameras tab.
+In 3D mode, the SolvePNP algorithm is used to compute the position and rotation of the target relative to the robot. This requires your :ref:`camera to be calibrated ` which can be done through the cameras tab.
The target model dropdown is used to select the target model used to compute target position. This should match the target your camera will be tracking.
diff --git a/source/docs/getting-started/pipeline-tuning/reflectiveAndShape/contour-filtering.rst b/source/docs/reflectiveAndShape/contour-filtering.rst
similarity index 100%
rename from source/docs/getting-started/pipeline-tuning/reflectiveAndShape/contour-filtering.rst
rename to source/docs/reflectiveAndShape/contour-filtering.rst
diff --git a/source/docs/getting-started/pipeline-tuning/reflectiveAndShape/images/hsl_top.png b/source/docs/reflectiveAndShape/images/hsl_top.png
similarity index 100%
rename from source/docs/getting-started/pipeline-tuning/reflectiveAndShape/images/hsl_top.png
rename to source/docs/reflectiveAndShape/images/hsl_top.png
diff --git a/source/docs/getting-started/pipeline-tuning/reflectiveAndShape/images/pumpkin.png b/source/docs/reflectiveAndShape/images/pumpkin.png
similarity index 100%
rename from source/docs/getting-started/pipeline-tuning/reflectiveAndShape/images/pumpkin.png
rename to source/docs/reflectiveAndShape/images/pumpkin.png
diff --git a/source/docs/getting-started/pipeline-tuning/reflectiveAndShape/images/triangle.png b/source/docs/reflectiveAndShape/images/triangle.png
similarity index 100%
rename from source/docs/getting-started/pipeline-tuning/reflectiveAndShape/images/triangle.png
rename to source/docs/reflectiveAndShape/images/triangle.png
diff --git a/source/docs/getting-started/pipeline-tuning/reflectiveAndShape/index.rst b/source/docs/reflectiveAndShape/index.rst
similarity index 56%
rename from source/docs/getting-started/pipeline-tuning/reflectiveAndShape/index.rst
rename to source/docs/reflectiveAndShape/index.rst
index 6b893f88..53f0c5a2 100644
--- a/source/docs/getting-started/pipeline-tuning/reflectiveAndShape/index.rst
+++ b/source/docs/reflectiveAndShape/index.rst
@@ -1,5 +1,5 @@
-Reflective / Colored Shape Tuning
-=================================
+Colored Shape Detection
+=======================
.. toctree::
:maxdepth: 0
diff --git a/source/docs/getting-started/pipeline-tuning/reflectiveAndShape/thresholding.rst b/source/docs/reflectiveAndShape/thresholding.rst
similarity index 83%
rename from source/docs/getting-started/pipeline-tuning/reflectiveAndShape/thresholding.rst
rename to source/docs/reflectiveAndShape/thresholding.rst
index e7acb70e..5e9826a6 100644
--- a/source/docs/getting-started/pipeline-tuning/reflectiveAndShape/thresholding.rst
+++ b/source/docs/reflectiveAndShape/thresholding.rst
@@ -1,7 +1,7 @@
Thresholding
============
-In this step, we want to tune our HSV thresholds such that only the goal color remains after the thresholding. The `HSV color representation `__ is similar to RGB in that it represents colors. However, HSV represents colors with hue, saturation and value components. Hue refers to the color, while saturation and value describe its richness and brightness.
+For colored shape detection, we want to tune our HSV thresholds such that only the goal color remains after the thresholding. The `HSV color representation `__ is similar to RGB in that it represents colors. However, HSV represents colors with hue, saturation and value components. Hue refers to the color, while saturation and value describe its richness and brightness.
In PhotonVision, HSV thresholds is available in the "Threshold" tab.
diff --git a/source/docs/hardware/Settings.rst b/source/docs/settings.rst
similarity index 97%
rename from source/docs/hardware/Settings.rst
rename to source/docs/settings.rst
index ae1cd524..015519f0 100644
--- a/source/docs/hardware/Settings.rst
+++ b/source/docs/settings.rst
@@ -1,7 +1,7 @@
Settings
========
-.. image:: images/settings.png
+.. image:: assets/settings.png
General
^^^^^^^
diff --git a/source/docs/programming/photonlib/images/SimArchitecture.svg b/source/docs/simulation/diagrams/SimArchitecture-deprecated.drawio.svg
similarity index 100%
rename from source/docs/programming/photonlib/images/SimArchitecture.svg
rename to source/docs/simulation/diagrams/SimArchitecture-deprecated.drawio.svg
diff --git a/source/docs/simulation/diagrams/SimArchitecture.drawio.svg b/source/docs/simulation/diagrams/SimArchitecture.drawio.svg
new file mode 100644
index 00000000..69d941b9
--- /dev/null
+++ b/source/docs/simulation/diagrams/SimArchitecture.drawio.svg
@@ -0,0 +1,4 @@
+
+
+
+
\ No newline at end of file
diff --git a/source/docs/programming/photonlib/hardware-in-the-loop-sim.rst b/source/docs/simulation/hardware-in-the-loop-sim.rst
similarity index 93%
rename from source/docs/programming/photonlib/hardware-in-the-loop-sim.rst
rename to source/docs/simulation/hardware-in-the-loop-sim.rst
index ddcd9280..02e72b15 100644
--- a/source/docs/programming/photonlib/hardware-in-the-loop-sim.rst
+++ b/source/docs/simulation/hardware-in-the-loop-sim.rst
@@ -3,7 +3,7 @@ Hardware In The Loop Simulation
Hardware in the loop simulation is using a physical device, such as a supported co-processor running PhotonVision, to enhance simulation capabilities. This is useful for developing and validating code before the camera is attached to a robot, as well as reducing the work required to use WPILib simulation with PhotonVision.
-The first step is to install PhotonVision on your target device. Instructions can be found `here `_ for all devices.
+The first step is to install PhotonVision on your target device. Instructions can be found :ref:`here ` for all devices.
A small amount of configuration is required after installation. From the PhotonVision UI, go to the sidebar and select the Settings option. Within the settings, turn on "Run NetworkTables Server".
diff --git a/source/docs/simulation/images/SimArchitecture.svg b/source/docs/simulation/images/SimArchitecture.svg
new file mode 100644
index 00000000..6eff0a73
--- /dev/null
+++ b/source/docs/simulation/images/SimArchitecture.svg
@@ -0,0 +1,3 @@
+
+
+
\ No newline at end of file
diff --git a/source/docs/simulation/images/SimExampleField.png b/source/docs/simulation/images/SimExampleField.png
new file mode 100644
index 00000000..833f1b8b
Binary files /dev/null and b/source/docs/simulation/images/SimExampleField.png differ
diff --git a/source/docs/simulation/images/SimExampleFrame.png b/source/docs/simulation/images/SimExampleFrame.png
new file mode 100644
index 00000000..6fb76a17
Binary files /dev/null and b/source/docs/simulation/images/SimExampleFrame.png differ
diff --git a/source/docs/programming/photonlib/images/hardware-in-the-loop-sim.png b/source/docs/simulation/images/hardware-in-the-loop-sim.png
similarity index 100%
rename from source/docs/programming/photonlib/images/hardware-in-the-loop-sim.png
rename to source/docs/simulation/images/hardware-in-the-loop-sim.png
diff --git a/source/docs/programming/photonlib/images/nt-server-toggle.png b/source/docs/simulation/images/nt-server-toggle.png
similarity index 100%
rename from source/docs/programming/photonlib/images/nt-server-toggle.png
rename to source/docs/simulation/images/nt-server-toggle.png
diff --git a/source/docs/simulation/index.rst b/source/docs/simulation/index.rst
new file mode 100644
index 00000000..1ec3a38d
--- /dev/null
+++ b/source/docs/simulation/index.rst
@@ -0,0 +1,10 @@
+Simulation
+==========
+
+.. toctree::
+ :maxdepth: 0
+ :titlesonly:
+
+ simulation
+ simulation-deprecated
+ hardware-in-the-loop-sim
diff --git a/source/docs/programming/photonlib/simulation.rst b/source/docs/simulation/simulation-deprecated.rst
similarity index 51%
rename from source/docs/programming/photonlib/simulation.rst
rename to source/docs/simulation/simulation-deprecated.rst
index eb27a84e..d47d625d 100644
--- a/source/docs/programming/photonlib/simulation.rst
+++ b/source/docs/simulation/simulation-deprecated.rst
@@ -1,11 +1,14 @@
-Simulation Support in PhotonLib
-===============================
+Simulation Support in PhotonLib (Deprecated)
+============================================
+
+.. attention:: This page details the pre-2024 simulation support. For current Java simulation support, see :doc:`/docs/simulation/simulation`.
What Is Supported?
------------------
-PhotonLib supports simulation of a Photon Vision camera and processor moving about a field on a robot.
-You can use this to help validate your robot code's behavior in simulation without special wrappers or additional hardware.
+PhotonLib supports simulation of a camera and coprocessor running PhotonVision moving about a field on a robot.
+
+You can use this to help validate your robot code's behavior in simulation without needing a physical robot.
Simulation Vision World Model
-----------------------------
@@ -21,73 +24,71 @@ Targets are considered in view if:
3) The target's in-image pixel size is greater than ``minTargetArea``
4) The distance from the camera to the target is less than ``maxLEDRange``
-.. warning:: Not all network tables objects are updated in simulation. The interaction through PhotonLib remains the same. Actual camera images are also not simulated.
+.. warning:: Not all network tables objects are updated in simulation. The interaction through PhotonLib remains the same. Actual camera images are also not simulated.
Latency of processing is not yet modeled.
-.. image:: images/SimArchitecture.svg
-
-
+.. image:: diagrams/SimArchitecture-deprecated.drawio.svg
+ :alt: A diagram comparing the architecture of a real PhotonVision process to a simulated one.
Simulated Vision System
-----------------------
-A ``SimVisionSystem`` represents the camera, coprocessor, and PhotonVision software moving around on the field.
+A ``SimVisionSystem`` represents the camera and coprocessor running PhotonVision moving around on the field.
It requires a number of pieces of configuration to accurately simulate your physical setup. Match them to your configuration in PhotonVision, and to your robot's physical dimensions.
.. tab-set-code::
- .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/80e16ece87c735e30755dea271a56a2ce217b588/photonlib-java-examples/simaimandrange/src/main/java/frc/robot/sim/DrivetrainSim.java
- :language: java
- :lines: 73-93
+ .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/80e16ece87c735e30755dea271a56a2ce217b588/photonlib-java-examples/simaimandrange/src/main/java/frc/robot/sim/DrivetrainSim.java
+ :language: java
+ :lines: 73-93
-
-After declaring the system, you should create and add one ``SimVisionTarget`` per target on the field you are attempting to detect.
+After declaring the system, you should create and add one ``SimVisionTarget`` per target you are attempting to detect.
.. tab-set-code::
- .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/80e16ece87c735e30755dea271a56a2ce217b588/photonlib-java-examples/simaimandrange/src/main/java/frc/robot/sim/DrivetrainSim.java
- :language: java
- :lines: 95-111
+ .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/80e16ece87c735e30755dea271a56a2ce217b588/photonlib-java-examples/simaimandrange/src/main/java/frc/robot/sim/DrivetrainSim.java
+ :language: java
+ :lines: 95-111
Finally, while running the simulation, process simulated camera frames by providing the robot's pose to the system.
.. tab-set-code::
- .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/80e16ece87c735e30755dea271a56a2ce217b588/photonlib-java-examples/simaimandrange/src/main/java/frc/robot/sim/DrivetrainSim.java
- :language: java
- :lines: 138-139
+ .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/80e16ece87c735e30755dea271a56a2ce217b588/photonlib-java-examples/simaimandrange/src/main/java/frc/robot/sim/DrivetrainSim.java
+ :language: java
+ :lines: 138-139
This will cause most NetworkTables fields to update properly, representing any targets that are in view of the robot.
Robot software which uses PhotonLib to interact with a camera running PhotonVision should work the same as though a real camera was hooked up and active.
-
Raw-Data Approach
-----------------
-Advanced users may wish to directly provide target information based on an existing detailed simulation.
+Users may wish to directly provide target information based on an existing detailed simulation.
A ``SimPhotonCamera`` can be created for this purpose. It provides an interface where the user can supply target data via a list of ``PhotonTrackedTarget`` objects.
.. tab-set-code::
+
.. code-block:: java
- @Override
- public void simulationInit() {
- // ...
- cam = new SimPhotonCamera("MyCamera");
- // ...
- }
-
- @Override
- public void simulationPeriodic() {
- // ...
- ArrayList visibleTgtList = new ArrayList();
- visibleTgtList.add(new PhotonTrackedTarget(yawDegrees, pitchDegrees, area, skew, camToTargetTrans)); // Repeat for each target that you see
- cam.submitProcessedFrame(0.0, visibleTgtList);
- // ...
- }
+ @Override
+ public void simulationInit() {
+ // ...
+ cam = new SimPhotonCamera("MyCamera");
+ // ...
+ }
+
+ @Override
+ public void simulationPeriodic() {
+ // ...
+ ArrayList visibleTgtList = new ArrayList();
+ visibleTgtList.add(new PhotonTrackedTarget(yawDegrees, pitchDegrees, area, skew, camToTargetTrans)); // Repeat for each target that you see
+ cam.submitProcessedFrame(0.0, visibleTgtList);
+ // ...
+ }
Note that while there is less code and configuration required to get basic data into the simulation, this approach will cause the user to need to implement much more code on their end to calculate the relative positions of the robot and target. If you already have this, the raw interface may be helpful. However, if you don't, you'll likely want to be looking at the Simulated Vision System first.
diff --git a/source/docs/simulation/simulation.rst b/source/docs/simulation/simulation.rst
new file mode 100644
index 00000000..4cc23295
--- /dev/null
+++ b/source/docs/simulation/simulation.rst
@@ -0,0 +1,226 @@
+Simulation Support in PhotonLib
+===============================
+
+.. attention:: This page details the current simulation support for Java. For other languages, see :doc:`/docs/simulation/simulation-deprecated`.
+
+What Is Simulated?
+------------------
+
+Simulation is a powerful tool for validating robot code without access to a physical robot. Read more about `simulation in WPILib `_.
+
+PhotonLib can simulate cameras on the field and generate target data approximating what would be seen in reality. This simulation attempts to include the following:
+
+- Camera Properties
+ - Field of Vision
+ - Lens distortion
+ - Image noise
+ - Framerate
+ - Latency
+- Target Data
+ - Detected / minimum-area-rectangle corners
+ - Center yaw/pitch
+ - Contour image area percentage
+ - Fiducial ID
+ - Fiducial ambiguity
+ - Fiducial solvePNP transform estimation
+- Camera Raw/Processed Streams (grayscale)
+
+.. note::
+
+ Simulation does NOT include the following:
+
+ - Full physical camera/world simulation (targets are automatically thresholded)
+ - Image Thresholding Process (camera gain, brightness, etc)
+ - Pipeline switching
+ - Snapshots
+
+This scope was chosen to balance fidelity of the simulation with the ease of setup, in a way that would best benefit most teams.
+
+.. image:: diagrams/SimArchitecture.drawio.svg
+ :alt: A diagram comparing the architecture of a real PhotonVision process to a simulated one.
+
+Drivetrain Simulation Prerequisite
+----------------------------------
+
+A prerequisite for simulating vision frames is knowing where the camera is on the field-- to utilize PhotonVision simulation, you'll need to supply the simulated robot pose periodically. This requires drivetrain simulation for your robot project if you want to generate camera frames as your robot moves around the field.
+
+References for using PhotonVision simulation with drivetrain simulation can be found in the `PhotonLib Java Examples `_ for both a differential drivetrain and a swerve drive.
+
+.. important:: The simulated drivetrain pose must be separate from the drivetrain estimated pose if a pose estimator is utilized.
+
+Vision System Simulation
+------------------------
+
+A ``VisionSystemSim`` represents the simulated world for one or more cameras, and contains the vision targets they can see. It is constructed with a unique label:
+
+.. tab-set-code::
+
+ .. code-block:: java
+
+ // A vision system sim labelled as "main" in NetworkTables
+ VisionSystemSim visionSim = new VisionSystemSim("main");
+
+PhotonLib will use this label to put a ``Field2d`` widget on NetworkTables at `/VisionSystemSim-[label]/Sim Field`. This label does not need to match any camera name or pipeline name in PhotonVision.
+
+Vision targets require a ``TargetModel``, which describes the shape of the target. For AprilTags, PhotonLib provides ``TargetModel.kAprilTag16h5`` for the tags used in 2023, and ``TargetModel.kAprilTag36h11`` for the tags used starting in 2024. For other target shapes, convenience constructors exist for spheres, cuboids, and planar rectangles. For example, a planar rectangle can be created with:
+
+.. tab-set-code::
+
+ .. code-block:: java
+
+ // A 0.5 x 0.25 meter rectangular target
+ TargetModel targetModel = new TargetModel(0.5, 0.25);
+
+These ``TargetModel`` are paired with a target pose to create a ``VisionTargetSim``. A ``VisionTargetSim`` is added to the ``VisionSystemSim`` to become visible to all of its cameras.
+
+.. tab-set-code::
+
+ .. code-block:: java
+
+ // The pose of where the target is on the field.
+ // Its rotation determines where "forward" or the target x-axis points.
+ // Let's say this target is flat against the far wall center, facing the blue driver stations.
+ Pose3d targetPose = new Pose3d(16, 4, 2, new Rotation3d(0, 0, Math.PI));
+ // The given target model at the given pose
+ VisionTargetSim visionTarget = new VisionTargetSim(targetPose, targetModel);
+
+ // Add this vision target to the vision system simulation to make it visible
+ visionSim.addVisionTargets(visionTarget);
+
+.. note:: The pose of a ``VisionTargetSim`` object can be updated to simulate moving targets. Note, however, that this will break latency simulation for that target.
+
+For convenience, an ``AprilTagFieldLayout`` can also be added to automatically create a target for each of its AprilTags.
+
+.. tab-set-code::
+
+ .. code-block:: java
+
+ // The layout of AprilTags which we want to add to the vision system
+ AprilTagFieldLayout tagLayout = AprilTagFieldLayout.loadFromResource(AprilTagFields.k2023ChargedUp.m_resourceFile);
+
+ visionSim.addAprilTags(tagLayout);
+
+.. note:: The poses of the AprilTags from this layout depend on its current alliance origin (e.g. blue or red). If this origin is changed later, the targets will have to be cleared from the ``VisionSystemSim`` and re-added.
+
+Camera Simulation
+-----------------
+
+Now that we have a simulation world with vision targets, we can add simulated cameras to view it.
+
+Before adding a simulated camera, we need to define its properties. This is done with the ``SimCameraProperties`` class:
+
+.. tab-set-code::
+
+ .. code-block:: java
+
+ // The simulated camera properties
+ SimCameraProperties cameraProp = new SimCameraProperties();
+
+By default, this will create a 960 x 720 resolution camera with a 90 degree diagonal FOV(field-of-view) and no noise, distortion, or latency. If we want to change these properties, we can do so:
+
+.. tab-set-code::
+
+ .. code-block:: java
+
+ // A 640 x 480 camera with a 100 degree diagonal FOV.
+ cameraProp.setCalibration(640, 480, Rotation2d.fromDegrees(100));
+ // Approximate detection noise with average and standard deviation error in pixels.
+ cameraProp.setCalibError(0.25, 0.08);
+ // Set the camera image capture framerate (Note: this is limited by robot loop rate).
+ cameraProp.setFPS(20);
+ // The average and standard deviation in milliseconds of image data latency.
+ cameraProp.setAvgLatencyMs(35);
+ cameraProp.setLatencyStdDevMs(5);
+
+These properties are used in a ``PhotonCameraSim``, which handles generating captured frames of the field from the simulated camera's perspective, and calculating the target data which is sent to the ``PhotonCamera`` being simulated.
+
+.. tab-set-code::
+
+ .. code-block:: java
+
+ // The PhotonCamera used in the real robot code.
+ PhotonCamera camera = new PhotonCamera("cameraName");
+
+ // The simulation of this camera. Its values used in real robot code will be updated.
+ PhotonCameraSim cameraSim = new PhotonCameraSim(camera, cameraProp);
+
+The ``PhotonCameraSim`` can now be added to the ``VisionSystemSim``. We have to define a robot-to-camera transform, which describes where the camera is relative to the robot pose (this can be measured in CAD or by hand).
+
+.. tab-set-code::
+
+ .. code-block:: java
+
+ // Our camera is mounted 0.1 meters forward and 0.5 meters up from the robot pose,
+ // (Robot pose is considered the center of rotation at the floor level, or Z = 0)
+ Translation3d robotToCameraTrl = new Translation3d(0.1, 0, 0.5);
+ // and pitched 15 degrees up.
+ Rotation3d robotToCameraRot = new Rotation3d(0, Math.toRadians(-15), 0);
+ Transform3d robotToCamera = new Transform3d(robotToCameraTrl, robotToCameraRot);
+
+ // Add this camera to the vision system simulation with the given robot-to-camera transform.
+ visionSim.addCamera(cameraSim, robotToCamera);
+
+.. important:: You may add multiple cameras to one ``VisionSystemSim``, but not one camera to multiple ``VisionSystemSim``. All targets in the ``VisionSystemSim`` will be visible to all its cameras.
+
+If the camera is mounted on a mobile mechanism (like a turret) this transform can be updated in a periodic loop.
+
+.. tab-set-code::
+
+ .. code-block:: java
+
+ // The turret the camera is mounted on is rotated 5 degrees
+ Rotation3d turretRotation = new Rotation3d(0, 0, Math.toRadians(5));
+ robotToCamera = new Transform3d(
+ robotToCameraTrl.rotateBy(turretRotation),
+ robotToCameraRot.rotateBy(turretRotation));
+ visionSim.adjustCamera(cameraSim, robotToCamera);
+
+Updating The Simulation World
+-----------------------------
+
+To update the ``VisionSystemSim``, we simply have to pass in the simulated robot pose periodically (in ``simulationPeriodic()``).
+
+.. tab-set-code::
+
+ .. code-block:: java
+
+ // Update with the simulated drivetrain pose. This should be called every loop in simulation.
+ visionSim.update(robotPoseMeters);
+
+Targets and cameras can be added and removed, and camera properties can be changed at any time.
+
+Visualizing Results
+-------------------
+
+Each ``VisionSystemSim`` has its own built-in ``Field2d`` for displaying object poses in the simulation world such as the robot, simulated cameras, and actual/measured target poses.
+
+.. tab-set-code::
+
+ .. code-block:: java
+
+ // Get the built-in Field2d used by this VisionSystemSim
+ visionSim.getDebugField();
+
+.. figure:: images/SimExampleField.png
+
+ *A* ``VisionSystemSim``\ *'s internal* ``Field2d`` *customized with target images and colors, as seen in the* `swervedriveposeestsim `_ *example.*
+
+A ``PhotonCameraSim`` can also draw and publish generated camera frames to a MJPEG stream similar to an actual PhotonVision process.
+
+.. tab-set-code::
+
+ .. code-block:: java
+
+ // Enable the raw and processed streams. These are enabled by default.
+ cameraSim.enableRawStream(true);
+ cameraSim.enableProcessedStream(true);
+
+ // Enable drawing a wireframe visualization of the field to the camera streams.
+ // This is extremely resource-intensive and is disabled by default.
+ cameraSim.enableDrawWireframe(true);
+
+These streams follow the port order mentioned in :ref:`docs/installation/networking:Camera Stream Ports`. For example, a single simulated camera will have its raw stream at ``localhost:1181`` and processed stream at ``localhost:1182``, which can also be found in the CameraServer tab of Shuffleboard like a normal camera stream.
+
+.. figure:: images/SimExampleFrame.png
+
+ *A frame from the processed stream of a simulated camera viewing some 2023 AprilTags with the field wireframe enabled, as seen in the* `swervedriveposeestsim example `_.
diff --git a/source/docs/troubleshooting/common-errors.rst b/source/docs/troubleshooting/common-errors.rst
index bff6b154..85a09e70 100644
--- a/source/docs/troubleshooting/common-errors.rst
+++ b/source/docs/troubleshooting/common-errors.rst
@@ -31,11 +31,11 @@ Camera won't show up
^^^^^^^^^^^^^^^^^^^^
Try these steps to :ref:`troubleshoot your camera connection `.
-If you are using a USB camera, it is possible your USB Camera isn't supported by CSCore and therefore won't work with PhotonVision. See :ref:`supported hardware page for more information `, or the above Camera Troubleshooting page for more information on determining this locally.
+If you are using a USB camera, it is possible your USB Camera isn't supported by CSCore and therefore won't work with PhotonVision. See :ref:`supported hardware page for more information `, or the above Camera Troubleshooting page for more information on determining this locally.
Camera is consistently returning incorrect values when in 3D mode
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Read the tips on the :ref:`camera calibration page`, follow the advice there, and redo the calibration.
+Read the tips on the :ref:`camera calibration page`, follow the advice there, and redo the calibration.
Not getting data from PhotonLib
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/source/docs/troubleshooting/networking-troubleshooting.rst b/source/docs/troubleshooting/networking-troubleshooting.rst
index 901dbb97..adcaeaaf 100644
--- a/source/docs/troubleshooting/networking-troubleshooting.rst
+++ b/source/docs/troubleshooting/networking-troubleshooting.rst
@@ -1,7 +1,7 @@
Networking Troubleshooting
==========================
-Before reading further, ensure that you follow all the recommendations :ref:`in our networking section `. You should follow these guidelines in order for PhotonVision to work properly; other networking setups are not officially supported.
+Before reading further, ensure that you follow all the recommendations :ref:`in our networking section `. You should follow these guidelines in order for PhotonVision to work properly; other networking setups are not officially supported.
Checklist
@@ -9,7 +9,7 @@ Checklist
A few issues make up the majority of support requests. Run through this checklist quickly to catch some common mistakes.
-- Is your camera connected to the robot's radio through a :ref:`network switch `?
+- Is your camera connected to the robot's radio through a :ref:`network switch `?
- Ethernet straight from a laptop to a coprocessor will not work (most likely), due to the unreliability of link-local connections.
- Even if there's a switch between your laptop and coprocessor, you'll still want a radio or router in the loop somehow.
- The FRC radio is the *only* router we will officially support due to the innumerable variations between routers.
@@ -19,8 +19,8 @@ A few issues make up the majority of support requests. Run through this checklis
- This is due to Limelight-specific hardware configuration that makes the image incompatible with stock Raspberry Pi devices.
- Is your robot code using a **2023** version of WPILib, and is your coprocessor using the most up to date **2023** release?
- 2022 and 2023 versions of either cannot be mix-and-matched!
- - Your PhotonVision version can be checked on the :ref:`settings tab`.
-- Is your team number correctly set on the :ref:`settings tab`?
+ - Your PhotonVision version can be checked on the :ref:`settings tab`.
+- Is your team number correctly set on the :ref:`settings tab`?
photonvision.local Not Found
diff --git a/source/index.rst b/source/index.rst
index 8078b158..f6ee923c 100644
--- a/source/index.rst
+++ b/source/index.rst
@@ -9,7 +9,7 @@ Content
.. grid:: 2
.. grid-item-card:: Getting Started
- :link: docs/getting-started/index
+ :link: docs/installation/index
:link-type: doc
Get started with installing PhotonVision, creating a pipeline, and tuning it for usage in competitions.
@@ -68,22 +68,44 @@ License
PhotonVision is licensed under the `GNU GPL v3 `_.
-Sitemap
--------
.. toctree::
- :maxdepth: 2
-
+ :maxdepth: 0
+ :caption: Getting Started
+ :hidden:
- docs/getting-started/description
- docs/getting-started/april-tags
- docs/getting-started/installation/index
- docs/getting-started/pipeline-tuning/index
+ docs/description
docs/hardware/index
+ docs/installation/index
+ docs/settings
+
+.. toctree::
+ :maxdepth: 0
+ :caption: Pipeline Tuning and Calibration
+ :hidden:
+
+ docs/pipelines/index
+ docs/apriltag-pipelines/index
+ docs/reflectiveAndShape/index
+ docs/calibration/calibration
+
+.. toctree::
+ :maxdepth: 1
+ :caption: Programming Reference
+ :hidden:
+
docs/programming/photonlib/index
- docs/programming/nt-api
+ docs/simulation/index
docs/integration/index
docs/examples/index
- docs/getting-started/best-practices
+
+.. toctree::
+ :maxdepth: 1
+ :caption: Additional Resources
+ :hidden:
+
docs/troubleshooting/index
+ docs/additional-resources/best-practices
+ docs/additional-resources/config
+ docs/additional-resources/nt-api
docs/contributing/index