diff --git a/.github/workflows/link-check.yaml b/.github/workflows/link-check.yaml
index 84b87c36..f7a1237c 100644
--- a/.github/workflows/link-check.yaml
+++ b/.github/workflows/link-check.yaml
@@ -22,12 +22,12 @@ jobs:
run: pip install -r docs/requirements.txt
- name: link-check
- run: make -C docs/ linkcheck SPHINXOPTS="-W --keep-going -n -q"
+ run: make -C docs/ linkcheckdiff SPHINXOPTS="-W --keep-going -n -q"
- name: Arhive Log
if: ${{ failure() }}
uses: actions/upload-artifact@v4
with:
name: LINKCHECK--1
- path: docs/build/linkcheck/output.txt
+ path: docs/build/linkcheckdiff/output.json
retention-days: 7
diff --git a/.github/workflows/pull-request.yaml b/.github/workflows/pull-request.yaml
index 9728b523..44707eea 100644
--- a/.github/workflows/pull-request.yaml
+++ b/.github/workflows/pull-request.yaml
@@ -133,6 +133,7 @@ jobs:
if unzip -t linkcheck.zip >/dev/null 2>&1; then
unzip -q linkcheck.zip -d linkcheck_contents
echo "Successfully downloaded and unzipped previous LINKCHECK artifact"
+ mv linkcheck_contents/output.json docs/main-output.json
else
echo "Downloaded file is not a valid zip. Skipping unzip."
rm linkcheck.zip
@@ -141,38 +142,14 @@ jobs:
- name: link-check
run: |
- make -C docs/ linkcheck SPHINXOPTS="-W --keep-going -n -q" || true
-
- - name: Compare outputs
- run: |
- echo "Archived Errors"
- cat linkcheck_contents/output.txt
- echo "PR Errors"
- cat docs/build/linkcheck/output.txt
-
- if [ -f linkcheck_contents/output.txt ]; then
- sort linkcheck_contents/output.txt > old_sorted.txt
- sort docs/build/linkcheck/output.txt > new_sorted.txt
- new_errors=$(comm -13 old_sorted.txt new_sorted.txt)
- if [ -n "$new_errors" ]; then
- echo "New errors found:"
- echo "$new_errors"
- exit 1
- else
- echo "No new errors found."
- fi
- else
- echo "No previous output.txt found. Treating all current errors as new."
- cat docs/build/linkcheck/output.txt
- [ -s docs/build/linkcheck/output.txt ] && exit 1 || exit 0
- fi
+ make -C docs/ linkcheckdiff SPHINXOPTS="-W --keep-going -n -q"
- name: Archive Log
if: always()
uses: actions/upload-artifact@v4
with:
name: LINKCHECK-PR-${{ github.event.number }}-${{ github.run_attempt }}
- path: docs/build/linkcheck/output.txt
+ path: docs/build/linkcheckdiff/output.json
retention-days: 7
image-check:
diff --git a/docs/requirements.txt b/docs/requirements.txt
index 24b7fe8f..4fa6b39c 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -10,3 +10,4 @@ sphinx-sitemap==2.3.0
python-git-info==0.8.3
sphinxcontrib-mermaid==0.9.2
sphinxext-rediraffe==0.2.7
+git+https://github.com/FIRST-Tech-Challenge/ftcdocs-helper@main#subdirectory=linkcheckdiff
diff --git a/docs/source/color_processing/color-blob-concepts/color-blob-concepts.rst b/docs/source/color_processing/color-blob-concepts/color-blob-concepts.rst
new file mode 100644
index 00000000..1abad05f
--- /dev/null
+++ b/docs/source/color_processing/color-blob-concepts/color-blob-concepts.rst
@@ -0,0 +1,147 @@
+Color Blob Concepts
+===================
+
+Color Blobs
+-----------
+
+An image can be evaluated by its **groupings of similar colors**.
+
+The smallest unit of any digital image is a **pixel**: a tiny square of one
+particular color.
+
+Each grouping or cluster of similar-colored pixels is called a **Blob**, which
+can be irregular in size and shape.
+
+Forming a Blob is done automatically by the software. It seeks pixels of
+similar color that are **contiguous** -- touching each other along an edge, not
+just at a corner.
+
+.. figure:: images/10-Blobs-formation.png
+ :width: 75%
+ :align: center
+ :alt: Blob Formation Visualization
+
+ Blob Formation Visualization
+
+There are 9 Blobs here, not 4. Some are very small, just one pixel each.
+
+The 5 pixels at top right, for example, are not contiguous (edges joined), so
+they are not joined to form a larger Blob.
+
+The above simple example has only 2 colors: black and white. For FTC, the
+definition of "similar" colors is a range specified by you.
+
+.. figure:: images/20-Blobs-red-chair.png
+ :width: 75%
+ :align: center
+ :alt: Defining Blobs from an image of a red chair
+
+ Blobs from a Red Chair image
+
+In the above example, the chair surfaces are not **exactly** the same shade of
+red. But with a **target** definition "close to red" or "mostly red", the
+software can form reasonable Blobs for further processing.
+
+Color Processing
+----------------
+
+Now let's point the camera at an INTO THE DEEP game element called a
+**Sample**.
+
+.. figure:: images/30-Blobs-blue-basic.png
+ :width: 75%
+ :align: center
+ :alt: Detecting Blob from a Blue SAMPLE
+
+ Blob from a Blue SAMPLE
+
+Here the software was told to seek shades of blue. The orange rectangle
+encloses a Blob of blue color.
+
+But why doesn't the rectangle enclose the entire game piece? The software is
+processing only a certain **Region of Interest** or ROI. That's the white
+rectangle; its size and location are specified by you.
+
+Anything outside the ROI will not be considered part of any Blob that is
+detected. This can help you avoid detecting (unwanted) background objects.
+
+In the example above, the Blob was actually outlined in teal (blue-green
+color), very hard to see. Let's try another image:
+
+.. figure:: images/40-Blobs-single.png
+ :width: 75%
+ :align: center
+ :alt: Finding Teal Outline
+
+ Teal Outline of Blue Blob
+
+Now the teal outline of the blue Blob can be seen. Its shape is irregular,
+which can be difficult for your OpMode to evaluate.
+
+boxFit Rectangles
+-----------------
+
+The orange rectangle is drawn automatically by OpenCV, to give your OpMode a
+simpler geometric shape that represents the Blob. It's not **exactly** like
+the actual Blob, but hopefully still useful.
+
+The orange rectangle, called the **boxFit**, fits tightly around the extreme
+edges of the Blob. The boxFit is **not** required to stay inside the Region of
+Interest. In the above case, the best-fitting rectangle happens to stay inside
+the ROI.
+
+But here's another case:
+
+.. figure:: images/50-Blobs-tilted.png
+ :width: 75%
+ :align: center
+ :alt: Showing Boxfit position
+
+ New boxFit position
+
+Look very closely for the teal outline of the Blob, with its very rough lower
+edge.
+
+Here, the best-fitting rectangle (boxFit) is **tilted**, and is not contained
+inside the ROI.
+
+OpenCV provides all data for the boxFit, including its corner points, size, and
+tilt angle. It can even provide a fitted horizontal version of the boxFit
+rectangle, if you prefer not to handle a tilted boxFit.
+
+Now things get a bit more complicated:
+
+.. figure:: images/60-Blobs-two.png
+ :width: 75%
+ :align: center
+ :alt: Detecting two blobs
+
+ Detecting two blobs
+
+OpenCV detected **two Blobs**, each with a teal outline and each with a boxFit.
+
+Your OpMode will need to "decide" which boxFit is important and which to
+ignore. Fortunately, OpenCV provides tools to **filter out** certain unwanted
+results. After filtering, your OpMode can **sort** the remaining results, to
+focus on the highest priority.
+
+With these tools, your OpMode could handle even a "busy" result like this one:
+
+.. figure:: images/70-Blobs-many.png
+ :width: 75%
+ :align: center
+ :alt: Many blob detections
+
+ Many Blob Detections
+
+Your programming tasks will include:
+
+* determine which boxFit is most relevant,
+* evaluate its data, and
+* take robot action accordingly.
+
+Now try the Sample OpMode for the :doc:`Color Locator <../color-locator-discover/color-locator-discover>` processor.
+
+============
+
+*Questions, comments and corrections to westsiderobotics@verizon.net*
diff --git a/docs/source/color_processing/color-blob-concepts/images/10-Blobs-formation.png b/docs/source/color_processing/color-blob-concepts/images/10-Blobs-formation.png
new file mode 100644
index 00000000..9d388cdb
Binary files /dev/null and b/docs/source/color_processing/color-blob-concepts/images/10-Blobs-formation.png differ
diff --git a/docs/source/color_processing/color-blob-concepts/images/20-Blobs-red-chair.png b/docs/source/color_processing/color-blob-concepts/images/20-Blobs-red-chair.png
new file mode 100644
index 00000000..a2b1e78b
Binary files /dev/null and b/docs/source/color_processing/color-blob-concepts/images/20-Blobs-red-chair.png differ
diff --git a/docs/source/color_processing/color-blob-concepts/images/30-Blobs-blue-basic.png b/docs/source/color_processing/color-blob-concepts/images/30-Blobs-blue-basic.png
new file mode 100644
index 00000000..4c742258
Binary files /dev/null and b/docs/source/color_processing/color-blob-concepts/images/30-Blobs-blue-basic.png differ
diff --git a/docs/source/color_processing/color-blob-concepts/images/40-Blobs-single.png b/docs/source/color_processing/color-blob-concepts/images/40-Blobs-single.png
new file mode 100644
index 00000000..c0bd4102
Binary files /dev/null and b/docs/source/color_processing/color-blob-concepts/images/40-Blobs-single.png differ
diff --git a/docs/source/color_processing/color-blob-concepts/images/50-Blobs-tilted.png b/docs/source/color_processing/color-blob-concepts/images/50-Blobs-tilted.png
new file mode 100644
index 00000000..9c9b070d
Binary files /dev/null and b/docs/source/color_processing/color-blob-concepts/images/50-Blobs-tilted.png differ
diff --git a/docs/source/color_processing/color-blob-concepts/images/60-Blobs-two.png b/docs/source/color_processing/color-blob-concepts/images/60-Blobs-two.png
new file mode 100644
index 00000000..82a6ae47
Binary files /dev/null and b/docs/source/color_processing/color-blob-concepts/images/60-Blobs-two.png differ
diff --git a/docs/source/color_processing/color-blob-concepts/images/70-Blobs-many.png b/docs/source/color_processing/color-blob-concepts/images/70-Blobs-many.png
new file mode 100644
index 00000000..930d6f3a
Binary files /dev/null and b/docs/source/color_processing/color-blob-concepts/images/70-Blobs-many.png differ
diff --git a/docs/source/color_processing/color-locator-challenge/color-locator-challenge.rst b/docs/source/color_processing/color-locator-challenge/color-locator-challenge.rst
new file mode 100644
index 00000000..1d2f40b7
--- /dev/null
+++ b/docs/source/color_processing/color-locator-challenge/color-locator-challenge.rst
@@ -0,0 +1,572 @@
+Color Locator (Challenge)
+=========================
+
+Overview
+--------
+
+This **Challenge** page introduces Color Locator settings that were **not mentioned in the Sample OpMode**. It assumes you have already followed this tutorial's previous pages:
+
+
+* :doc:`Discover <../color-locator-discover/color-locator-discover>` page, to open and test the Sample OpMode
+* :doc:`Explore <../color-locator-explore/color-locator-explore>` page, to edit settings mentioned in the Sample OpMode
+
+Here are the additional ColorLocator settings covered in this page:
+
+* pre-filtering, affecting Blob results and previews
+* pre-sorting
+* custom outline colors in the previews: ROI, contour, boxFit
+* access a boxFit's corner points
+* access all vertices of a contour
+* access a boxFit's size and tilt angle
+* create a horizonal rectangle around a tilted boxFit
+* access the horizontal rectangle's size and location
+
+Pre-Filter Intro
+----------------
+
+Here the term "pre-" means the filter criteria are implemented **before** the
+Blob formation results are passed further. Thus the DS and RC previews will
+**not display** any filtered-out contour or its boxFit. This can save CPU
+resources used to draw the outlines.
+
+Likewise the resulting list of results will not include any filtered-out Blobs.
+A shorter list can help OpMode cycle time.
+
+This contrasts with the post-filtering present in the Sample OpMode and
+discussed at this tutorial's Explore page. Pre-filtering is a setting that
+persists, while post-filtering is a one-time action performed on a single set
+of processing results.
+
+Teams may wish to use both. Use a pre-filter to "clean up" the preview, which
+can appear chaotic with potentially dozens of Blobs. Then use a post-filter
+to focus on the particular boxFit results of interest.
+
+Caution: changing the ROI size and/or changing the camera resolution may
+require an adjustment to filtering by Area.
+
+Pre-Filter Programming
+----------------------
+
+To apply a filter setting, use two steps:
+
+
+* **set** the filter name and criteria
+* **add** that filter to the existing Processor
+
+The "Color Blob Locator" Processor must already be created; adding a filter is
+**not** part of the Builder pattern here. A pre-filter can be added before or
+after the VisionPortal is built.
+
+In general, a pre-filter setting remains in place and cannot be edited. To
+"change" a pre-filter, it must be **removed** from the Processor, then
+**added** again.
+
+Recall from this tutorial's :doc:`Explore
+<../color-locator-explore/color-locator-explore>` page, to edit settings
+mentioned in the Sample OpMode that filtering can be done by Contour Area, Blob
+Density and boxFit Aspect Ratio.
+
+Multiple pre-filters can operate at the same time. A single common filter name
+**could** be used, if its criteria are defined, then added -- then redefined,
+and added again, etc.
+
+You might find it more versatile and convenient to create unique filter names,
+each separately managed (i.e. set criteria, add, remove, add again).
+
+Select and read the Blocks **or** Java section below:
+
+.. tab-set::
+ .. tab-item:: Blocks
+ :sync: blocks
+
+ These pre-filter Blocks are in the Vision/ColorBlobLocator/Pre-processing toolbox:
+
+ .. figure:: images/10-set-pre-filter.png
+ :width: 75%
+ :align: center
+ :alt: Setting a pre-filter
+
+ Setting a pre-filter
+
+ The Blocks in the toolbox use the same variable name ``myFilter`` (green
+ arrow) for the three available criteria (orange ovals). As noted above,
+ be careful about using a single name for different pre-filters.
+
+ For multiple pre-filters, you might prefer unique names; see the orange arrows:
+
+ .. figure:: images/12-set-3-filters.png
+ :width: 75%
+ :align: center
+ :alt: Setting 3 filters
+
+ Setting 3 filters
+
+ These Blocks for adding and removing filters are in the
+ Vision/ColorBlobLocator/Processor toolbox:
+
+ .. figure:: images/15-add-filter.png
+ :width: 75%
+ :align: center
+ :alt: Adding and Removing a filter
+
+ Adding and Removing a filter
+
+ Be careful to designate the correct filter to be added or removed; see
+ the green arrows.
+
+ .. tab-item:: Java
+ :sync: java
+
+ For multiple pre-filters, you might prefer unique names:
+
+ .. code-block:: java
+
+ myAreaFilter = new BlobFilter(BlobCriteria.BY_CONTOUR_AREA, 100, 20000);
+ myDensityFilter = new BlobFilter(BlobCriteria.BY_DENSITY, 0.5, 1.0);
+ myRatioFilter = new BlobFilter(BlobCriteria.BY_ASPECT_RATIO, 1.0, 10.0);
+
+ After defining a filter's criteria, add the filter to an already-existing
+ Processor:
+
+ .. code-block:: java
+
+ colorLocator.addFilter(myAreaFilter);
+ colorLocator.addFilter(myDensityFilter);
+ colorLocator.addFilter(myRatioFilter);
+
+ These methods can remove one or all filters from a Processor:
+
+ .. code-block:: java
+
+ colorLocator.removeFilter(myAreaFilter);
+ colorLocator.removeFilter(myDensityFilter);
+ colorLocator.removeFilter(myRatioFilter);
+ colorLocator.removeAllFilters();
+
+ After removal, a filter can be re-added to the Processor.
+
+Pre-sort
+--------
+
+Here the term "pre-" also means the sort criteria are already established, and
+applied to the results of the Blob formation process.
+
+This works the same as the post-sorting mentioned in the Sample OpMode and
+discussed at this tutorial's Explore page. Pre-sorting is a setting that
+persists, while post-sorting is a one-time action performed on a single set of
+processing results.
+
+Only one sort (the last one applied) affects the final list of results provided
+for the OpMode to evaluate. Thus there is no benefit to using both pre-sort
+and post-sort.
+
+To apply a sort, use two steps:
+
+* define the sort name and criteria
+* apply that sort to the existing Processor
+
+The "Color Blob Locator" Processor must already be created; adding a sort is
+**not** part of the Builder pattern here. A pre-sort can be added before or
+after the VisionPortal is built.
+
+In general, a pre-sort setting remains in place and cannot be removed or
+edited. To "change" a sort, simply define and apply another one, with the same
+or a unique name. The later ``setSort()`` will be in effect.
+
+Reminder from this tutorial's :doc:`Explore
+<../color-locator-explore/color-locator-explore>` page: by default, the Sample
+OpMode sorts by **Contour Area** in descending order (largest is first). This
+is an internally programmed sort, not appearing in the Sample OpMode. This
+default is overridden or replaced by any pre-sort or post-sort specified in the
+OpMode.
+
+Select and read the Blocks **or** Java section below:
+
+.. tab-set::
+ .. tab-item:: Blocks
+ :sync: blocks
+
+ These pre-sort Blocks are in the Vision/ColorBlobLocator/Pre-processing
+ toolbox:
+
+ .. figure:: images/20-sort-criteria.png
+ :width: 75%
+ :align: center
+ :alt: Selecting the Sort Criteria
+
+ Selecting the Sort Criteria
+
+ This Block for applying the named pre-sort is in the
+ Vision/ColorBlobLocator/Processor toolbox:
+
+ .. figure:: images/23-set-sort.png
+ :width: 75%
+ :align: center
+ :alt: Setting the Sort Criteria
+
+ Setting the Sort Criteria
+
+ .. tab-item:: Java
+ :sync: java
+
+ A generic pre-sort name works well, since only one sort can be in effect
+ at a time:
+
+ .. code-block:: java
+
+ mySort = new BlobSort(BlobCriteria.BY_CONTOUR_AREA, SortOrder.ASCENDING);
+
+ If you are experimenting with different pre-sort criteria, you might
+ consider unique names:
+
+ .. code-block:: java
+
+ myAreaSort = new BlobSort(BlobCriteria.BY_CONTOUR_AREA, SortOrder.ASCENDING);
+ myDensitySort = new BlobSort(BlobCriteria.BY_DENSITY, SortOrder.ASCENDING);
+ myRatioSort = new BlobSort(BlobCriteria.BY_ASPECT_RATIO, SortOrder.ASCENDING);
+
+ After defining a sort's criteria, apply the pre-sort to an
+ already-existing Processor:
+
+ .. code-block:: java
+
+ colorLocator.setSort(mySort);
+
+Preview Colors
+--------------
+
+You can specify custom colors for the preview outlines of:
+
+* Region of Interest (ROI)
+* Blob contour
+* boxFit rectangle
+
+Select and read the Blocks **or** Java section below:
+
+.. tab-set::
+ .. tab-item:: Blocks
+ :sync: blocks
+
+ .. figure:: images/30-outline-colors.png
+ :width: 75%
+ :align: center
+ :alt: Setting Custom outline colors
+
+ Setting custom outline colors
+
+ .. tab-item:: Java
+ :sync: java
+
+ Import the ``Color`` class if needed, then add any of the ``.set...``
+ methods to the Processor Builder pattern:
+
+ .. code-block:: java
+
+ import android.graphics.Color;
+ .
+ .
+ .setBoxFitColor(Color.rgb(255, 120, 31))
+ .setRoiColor(Color.rgb(255, 255, 255))
+ .setContourColor(Color.rgb(3, 227, 252))
+
+ Use your own custom values, only from the RGB Color Space. See the
+ separate tutorial page called **Color Spaces**.
+
+boxFit Corners
+--------------
+
+An OpMode can access the four corner points of a boxFit rectangle.
+
+Select and read the Blocks **or** Java section below:
+
+.. tab-set::
+ .. tab-item:: Blocks
+ :sync: blocks
+
+ This Blocks Function retrieves, stores and displays the 4 corner points
+ of the instant boxFit being processed by the OpMode:
+
+ .. figure:: images/40-corners.png
+ :width: 75%
+ :align: center
+ :alt: Displaying Corner Points via Telemetry
+
+ Displaying Corner Points via Telemetry
+
+ The ``.points`` and ``Point.x`` and ``Point.y`` Blocks are in the
+ "Vision/ColorBlobLocator/Blob data" toolbox.
+
+ The Function uses its own **For Loop** to cycle through the ``myPoints``
+ List of 4 points, clockwise from top left corner.
+
+ This Function operates inside the Sample OpMode's **For Loop** of all
+ Blob results. The instant ``myBoxFit`` is the one being processed,
+ returned from the preceding ``.BoxFit`` Block.
+
+ .. tab-item:: Java
+ :sync: java
+
+ This Java code retrieves, stores and displays the 4 corner points of the
+ instant boxFit being processed by the OpMode:
+
+ .. code-block:: java
+
+ // Display boxFit.points(), an array of the box's four (X, Y) corner points,
+ // clockwise from top left corner.
+ Point[] myBoxCorners = new Point[4];
+ boxFit.points(myBoxCorners);
+ // this points() method does not return values, it populates the argument
+ for (int i = 0; i < 4; i++)
+ {
+ telemetry.addLine(String.format("boxFit corner %d (%d,%d)",
+ i, (int) myBoxCorners[i].x, (int) myBoxCorners[i].y));
+ }
+
+ This code operates inside the Sample OpMode's **For Loop** of all Blob
+ results. The instant ``boxFit`` is the one being processed, returned
+ from the preceding ``getBoxFit()`` method.
+
+Contour Vertices
+----------------
+
+Blob contours are irregular and hard to process in code; it's easier to work
+with boxFit rectangles.
+
+But the contour's outer points can be accessed by an OpMode. The result is a
+list of (X, Y) coordinates, with origin at the top left corner of the camera's
+image. X increases to the right, Y increases downward.
+
+Select and read the Blocks **or** Java section below:
+
+.. tab-set::
+ .. tab-item:: Blocks
+ :sync: blocks
+
+ The following Blocks Function retrieves, stores and displays a List of
+ all the vertex Points of the contour of the instant Blob being processed
+ by the OpMode.
+
+ The List can be as short as 4 values, or dozens of values for jagged
+ contours.
+
+ Note that the ``.points`` Block (used above for boxFit corners) retrieves
+ and stores the List within the same Block. This ``.ContourPoints`` Block
+ only retrieves the List, to be assigned to a separate variable Block.
+
+ .. figure:: images/50-contour.png
+ :width: 75%
+ :align: center
+ :alt: Getting list of contour points
+
+ Getting the list of contour points
+
+ The ``.ContourPoints`` and ``Point.x`` and ``Point.y`` Blocks are in the
+ "Vision/ColorBlobLocator/Blob data" toolbox.
+
+ The Function uses its own **For Loop** to cycle through the
+ ``myContourPoints`` List, of undetermined length (could be very long).
+
+ This Function operates inside the Sample OpMode's **For Loop** of all
+ Blob results. The instant ``myBlob`` is the one being processed by that
+ outer For Loop.
+
+ .. tab-item:: Java
+ :sync: java
+
+ This Java code retrieves, stores and displays a List of all the vertex
+ Points of the contour of the instant Blob being processed by the OpMode.
+
+ .. code-block:: java
+
+ import org.opencv.core.Point;
+ .
+ .
+ // Display getContourPoints(), an array of the contour's many (X, Y) vertices
+ Point[] myContourPoints;
+ myContourPoints = b.getContourPoints();
+ int j = 0;
+ for(Point thisContourPoint : myContourPoints)
+ {
+ telemetry.addLine(String.format("contour vertex %d (%d,%d)",
+ j, (int) thisContourPoint.x, (int) thisContourPoint.y));
+ j += 1;
+ }
+
+ This Function operates inside the Sample OpMode's **For Loop** of all
+ Blob results. The instant Blob ``b`` is the one being processed by that
+ outer For Loop.
+
+ Not covered here is one feature available in Java only:
+
+ .. code-block:: java
+
+ MatOfPoint myContour = getContour()
+
+ This method returns a matrix unique to the OpenCV library. The matrix
+ object can convert itself to an array, as follows:
+
+ .. code-block:: java
+
+ MatOfPoint myContour;
+ Point[] myContourPoints;
+ myContour = b.getContour();
+ myContourPoints = myContour.toArray();
+
+ This code seems to give the same set of points as ``getContourPoints()``
+ shown above.
+
+boxFit Size and Angle
+---------------------
+
+These simple fields were not demonstrated in the Sample OpMode.
+
+The boxFit size variable contains two fields which must be accessed
+individually, as shown below.
+
+If the boxFit is horizontal (parallel to the ROI), its angle might be 0 or 90
+degrees, often jumping between the two values. At 90 degrees, height and width
+become switched. Your OpMode code needs to account for this scenario.
+
+Likewise, the boxFit angle is sometimes reported as clockwise from vertical,
+rather than counterclockwise from horizontal. More discussion is `here
+`_.
+
+Select and read the Blocks **or** Java section below:
+
+.. tab-set::
+ .. tab-item:: Blocks
+ :sync: blocks
+
+ These Blocks are in the "Vision/ColorBlobLocator/Blob data" toolbox.
+
+ .. figure:: images/60-box-size.png
+ :width: 75%
+ :align: center
+ :alt: boxFit properties
+
+ boxFit properties
+
+ .. tab-item:: Java
+ :sync: java
+
+ Here's a modified version of the Sample OpMode's telemetry code, to
+ display only the size and angle of the instant boxFit being processed.
+
+ .. code-block:: java
+
+ org.opencv.core.Size myBoxFitSize;
+ for(ColorBlobLocatorProcessor.Blob b : blobs)
+ {
+ RotatedRect boxFit = b.getBoxFit();
+ myBoxFitSize = boxFit.size;
+ telemetry.addData("width", myBoxFitSize.width);
+ telemetry.addData("height", myBoxFitSize.height);
+ telemetry.addData("angle", boxfit.angle);
+ }
+
+ The Java class ``Size`` here is different than another class of the same
+ simple name. OnBot Java and Android Studio do not allow imports of
+ identical simple classnames.
+
+ In fact OnBot Java will not allow the import of this version, even if the
+ other version (`android.util.Size`) is not used in the OpMode.
+ Instead, declare the variable with the full classname, as shown in the
+ first line above.
+
+Horizontal Rectangle
+--------------------
+
+You might prefer to process only **horizontal** best-fit rectangles, parallel
+to the ROI, not tilted.
+
+OpenCV can generate a best-fit rectangle for a boxFit, whether tilted or not.
+This is **not** a "forced horizonal" boxFit, rotated in place. The new
+horizontal rectangle simply touches and encloses the outer corners of the
+boxFit.
+
+If the boxFit is tilted, the new horizontal rectangle will be larger. If the
+boxFit already had an angle of 0 (or 90) degrees, the new rectangle will be
+identical.
+
+In Blocks and Java, the command ``boundingRect()`` accepts a boxFit of type
+``RotatedRect`` and returns a horizontal rectangle of type ``Rect``. The new
+rectangle is not drawn or depicted in the preview.
+
+Select and read the Blocks **or** Java section below:
+
+.. tab-set::
+ .. tab-item:: Blocks
+ :sync: blocks
+
+ These Blocks are in the "Vision/ColorBlobLocator/Blob data" toolbox.
+
+ .. figure:: images/70-horizontal-box.png
+ :width: 75%
+ :align: center
+ :alt: Defining the horizontal box
+
+ Defining the horizontal box
+
+ The (X, Y) values are the top left corner of the new horizontal
+ rectangle, in the full image reference frame.
+
+ .. tab-item:: Java
+ :sync: java
+
+ Here's a modified version of the Sample OpMode's telemetry code, to
+ display only the **top left corner** and **size** of the horizontal
+ rectangle around the boxFit being processed.
+
+ .. code-block:: java
+
+ import org.opencv.core.Rect;
+ .
+ .
+ for(ColorBlobLocatorProcessor.Blob b : blobs)
+ {
+ RotatedRect boxFit = b.getBoxFit();
+ Rect myHorizontalBoxFit = boxFit.boundingRect();
+ telemetry.addData("top left X", myHorizontalBoxFit.x);
+ telemetry.addData("top left Y", myHorizontalBoxFit.y);
+ telemetry.addData("width", myHorizontalBoxFit.width);
+ telemetry.addData("height", myHorizontalBoxFit.height);
+ }
+
+ In this case, OnBot Java and Android Studio found no conflicts with the
+ import of class ``Rect``.
+
+ Pay attention to classes and fields:
+
+ * boxFit is of Java type ``RotatedRect``, even though it's not usually rotated
+ * the new method ``boundingRect()`` returns an object of type ``Rect``
+ * the ``Size`` and ``Rect`` classes both have fields named ``height`` and ``width``
+
+Advanced Development
+--------------------
+
+Searching for multiple colors is possible by building **multiple processors**
+and adding them to the same VisionPortal. This allows different ROIs, for
+example, that can overlap if desired.
+
+.. figure:: images/80-two-process.png
+ :width: 75%
+ :align: center
+ :alt: Using two processors
+
+ Using two processors
+
+This ends the tutorial's 3 pages on ColorLocator:
+
+* :doc:`Discover <../color-locator-discover/color-locator-discover>`,
+* :doc:`Explore <../color-locator-explore/color-locator-explore>`,
+* **Challenge**
+
+The final page of this tutorial provides optional info on :doc:`Color Spaces
+<../color-spaces/color-spaces>`.
+
+Best of luck as you apply these tools to your Autonomous and TeleOp OpModes!
+
+============
+
+*Questions, comments and corrections to westsiderobotics@verizon.net*
+
diff --git a/docs/source/color_processing/color-locator-challenge/images/10-set-pre-filter.png b/docs/source/color_processing/color-locator-challenge/images/10-set-pre-filter.png
new file mode 100644
index 00000000..8ac8b502
Binary files /dev/null and b/docs/source/color_processing/color-locator-challenge/images/10-set-pre-filter.png differ
diff --git a/docs/source/color_processing/color-locator-challenge/images/12-set-3-filters.png b/docs/source/color_processing/color-locator-challenge/images/12-set-3-filters.png
new file mode 100644
index 00000000..af5166ff
Binary files /dev/null and b/docs/source/color_processing/color-locator-challenge/images/12-set-3-filters.png differ
diff --git a/docs/source/color_processing/color-locator-challenge/images/15-add-filter.png b/docs/source/color_processing/color-locator-challenge/images/15-add-filter.png
new file mode 100644
index 00000000..f3c7c5a8
Binary files /dev/null and b/docs/source/color_processing/color-locator-challenge/images/15-add-filter.png differ
diff --git a/docs/source/color_processing/color-locator-challenge/images/20-sort-criteria.png b/docs/source/color_processing/color-locator-challenge/images/20-sort-criteria.png
new file mode 100644
index 00000000..8bdae9d6
Binary files /dev/null and b/docs/source/color_processing/color-locator-challenge/images/20-sort-criteria.png differ
diff --git a/docs/source/color_processing/color-locator-challenge/images/23-set-sort.png b/docs/source/color_processing/color-locator-challenge/images/23-set-sort.png
new file mode 100644
index 00000000..b5ef74ca
Binary files /dev/null and b/docs/source/color_processing/color-locator-challenge/images/23-set-sort.png differ
diff --git a/docs/source/color_processing/color-locator-challenge/images/30-outline-colors.png b/docs/source/color_processing/color-locator-challenge/images/30-outline-colors.png
new file mode 100644
index 00000000..2f9e0231
Binary files /dev/null and b/docs/source/color_processing/color-locator-challenge/images/30-outline-colors.png differ
diff --git a/docs/source/color_processing/color-locator-challenge/images/40-corners.png b/docs/source/color_processing/color-locator-challenge/images/40-corners.png
new file mode 100644
index 00000000..1f2c4878
Binary files /dev/null and b/docs/source/color_processing/color-locator-challenge/images/40-corners.png differ
diff --git a/docs/source/color_processing/color-locator-challenge/images/50-contour.png b/docs/source/color_processing/color-locator-challenge/images/50-contour.png
new file mode 100644
index 00000000..d3b9c842
Binary files /dev/null and b/docs/source/color_processing/color-locator-challenge/images/50-contour.png differ
diff --git a/docs/source/color_processing/color-locator-challenge/images/60-box-size.png b/docs/source/color_processing/color-locator-challenge/images/60-box-size.png
new file mode 100644
index 00000000..252635a8
Binary files /dev/null and b/docs/source/color_processing/color-locator-challenge/images/60-box-size.png differ
diff --git a/docs/source/color_processing/color-locator-challenge/images/70-horizontal-box.png b/docs/source/color_processing/color-locator-challenge/images/70-horizontal-box.png
new file mode 100644
index 00000000..51f114ab
Binary files /dev/null and b/docs/source/color_processing/color-locator-challenge/images/70-horizontal-box.png differ
diff --git a/docs/source/color_processing/color-locator-challenge/images/80-two-process.png b/docs/source/color_processing/color-locator-challenge/images/80-two-process.png
new file mode 100644
index 00000000..6c8b7994
Binary files /dev/null and b/docs/source/color_processing/color-locator-challenge/images/80-two-process.png differ
diff --git a/docs/source/color_processing/color-locator-discover/color-locator-discover.rst b/docs/source/color_processing/color-locator-discover/color-locator-discover.rst
new file mode 100644
index 00000000..f56eba9c
--- /dev/null
+++ b/docs/source/color_processing/color-locator-discover/color-locator-discover.rst
@@ -0,0 +1,331 @@
+Color Locator (Discover)
+========================
+
+Overview
+--------
+
+Another way to use FTC's new OpenCV vision tools is to operate a "Color
+Locator". Namely, it can **find a target color that you specify**.
+
+As with the Color Sensor tool, you can specify a **Region of Interest** (ROI).
+Only that zone of the camera's view will be searched for the target color.
+
+The "target color" is actually a **range** of numerical color values, for a
+better chance of finding the desired color.
+
+OpenCV will form **"Blobs"** of that color. As described in the Concepts page
+here, a Blob is a contiguous cluster of similar-colored pixels.
+
+Blobs often have a complex, irregular shape or **contour**, so they are
+represented here by a best-fit rectangle called **"boxFit"**.
+
+.. figure:: images/20-blob-zoom.png
+ :width: 50%
+ :align: center
+ :alt: Zoomed Color Blob detection
+
+ Zoomed Color Blob detection
+
+The target color here is BLUE. The white rectangle is the Region of Interest
+(ROI), the teal jagged line is the Blob's contour (fully inside the ROI), and
+the purple rectangle is the boxFit.
+
+The software reports the size, position and orientation of each "boxFit". That
+data can be evaluated by your OpMode for **robot navigation** and other
+actions.
+
+The following sections describe how to do this, with a Sample OpMode.
+
+Configuration
+-------------
+
+*Skip this section if ...*
+
+* *the active robot configuration already contains "Webcam 1",* or
+* *using the built-in camera of an Android phone as Robot Controller.*
+
+Before starting the programming, REV Control Hub users should make a robot
+configuration that includes the USB webcam to be used as a color locator.
+
+For now, use the default webcam name, "Webcam 1". If a different name is
+preferred, edit the Sample OpMode to agree with the exact webcam name in the
+robot configuration.
+
+**Save and activate** that configuration; its name should appear on the paired
+Driver Station screen.
+
+Sample OpMode
+-------------
+
+Opening the Sample OpMode
++++++++++++++++++++++++++
+
+To learn about opening the Sample OpMode, select and read the Blocks **or**
+Java section below:
+
+.. tab-set::
+ .. tab-item:: Blocks
+ :sync: blocks
+
+ 1. On a laptop or desktop computer connected via Wi-Fi to the Robot
+ Controller, open the Chrome browser. Go to the REV Control
+ Hub's address http://192.168.43.1:8080 (or
+ http://192.168.49.1:8080 for Android RC phone) and click the
+ Blocks tab.
+
+ 2. Click ``Create New OpMode``\ , enter a new name such as
+ "ColorLocator_Monica_v01", and select the Sample OpMode
+ ``ConceptVisionColorLocator``.
+
+ 3. At the top of the Blocks screen, you can change the type from "TeleOp"
+ to "Autonomous", since this Sample OpMode does not use gamepads.
+
+ 4. If using the built-in camera of an RC phone, drag out the relevant
+ Block from the left-side ``VisionPortal.Builder`` toolbox.
+
+ 5. Save the OpMode, time to try it!
+
+ .. tab-item:: Java
+ :sync: java
+
+ 1. Open your choice of OnBot Java or Android Studio.
+
+ 2. In the ``teamcode`` folder, add/create a new OpMode with a name such
+ as "ColorLocator_Javier_v01.java", and select the Sample OpMode
+ ``ConceptVisionColorLocator.java``.
+
+ 3. At about Line 63, you can change ``@TeleOp`` to ``@Autonomous``\ ,
+ since this Sample OpMode does not use gamepads.
+
+ 4. If using the built-in camera of an RC phone, follow the OpMode
+ comments to specify that camera.
+
+ 5. Click "Build", time to try it!
+
+Running the Sample OpMode
++++++++++++++++++++++++++
+
+1. On the Driver Station, select the Autonomous OpMode that you just saved or
+ built.
+
+2. Turn off the automatic 30-second match timer (green slider). Aim the camera
+ at a **blue object**.
+
+3. Touch INIT only. The OpMode should give Telemetry showing the results of
+ one or more Blobs:
+
+.. figure:: images/23-basic-telemetry.png
+ :width: 75%
+ :align: center
+ :alt: Basic Telemetry
+
+ Basic Telemetry
+
+In this example, the Region of Interest (ROI) contains only one Blob of the
+default target color BLUE.
+
+Move the camera around, especially at BLUE objects, and watch the Telemetry
+area on the Driver Station screen. It may sometimes show more lines of Blob
+data, and sometimes show no Blob data at all.
+
+It's working! Your camera is working as a **color locator**. Think about how
+to use this in the FTC Robot Game.
+
+*Skip the next two sections, if you already know how to use FTC previews.*
+
+DS Preview
+----------
+
+Before describing the telemetry data, this page offers two sections showing how
+to view the OpenCV results with **previews**. Previewing is essential for
+working with vision code.
+
+On the Driver Station (DS), remain in INIT -- don't touch the Start button.
+
+At the top right corner, touch the 3-dots menu, then ``Camera Stream``. This
+shows the camera's view; tap the image to refresh it.
+
+.. figure:: images/30-CameraStream.png
+ :align: center
+ :width: 75%
+ :alt: DS Camera Stream Preview
+
+ DS Camera Stream Preview
+
+The default target color here is BLUE. The white rectangle is the Region of
+Interest (ROI), the teal (light blue) jagged line is the Blob's contour (fully
+inside the ROI), and the orange rectangle is the boxFit.
+
+For a BIG preview, touch the arrows at the bottom right corner.
+
+Or, select Camera Stream again, to return to the previous screen and its
+Telemetry.
+
+RC Preview
+----------
+
+The Robot Controller (RC) device also makes a preview, called ``LiveView``.
+This is full video, and is shown automatically on the screen of an RC phone.
+
+.. figure:: images/32-LiveView.png
+ :align: center
+ :width: 75%
+ :alt: Image of a LiveView stream
+
+ LiveView stream
+
+The above preview is from a REV Control Hub.
+
+It has no physical screen, so you must plug in an HDMI monitor **or** use
+open-source `scrcpy `_ (called
+"screen copy") to see the preview on a laptop or computer that's connected via
+Wi-Fi to the Control Hub.
+
+Basic Telemetry Data
+--------------------
+
+Let's look closer at the DS telemetry:
+
+.. figure:: images/35-basic-telemetry-circled.png
+ :align: center
+ :width: 75%
+ :alt: Image of telemetry output
+
+ Locator Telemetry
+
+In this example, the Region of Interest (ROI) contains only one Blob of the
+default target color BLUE. You could probably move your camera to achieve the
+same result - with the help of previews.
+
+The **first column** shows the **Area**, in pixels, of the Blob (contour, not
+boxFit). By default, the Sample OpMode uses a **filter** to show Blobs between
+50 and 20,000 pixels. Also by default, the Sample uses a **sort** tool to
+display multiple Blobs in descending order of Area (largest is first).
+
+The **second column** shows the **Density** of the Blob contour. From the
+Sample comments:
+
+..
+
+ *A blob's density is an indication of how "full" the contour is. If you put
+ a rubber band around the contour you would get the "Convex Hull" of the
+ contour. The density is the ratio of Contour-area to Convex Hull-area.*
+
+
+The **third column** shows the **Aspect Ratio of the boxFit**, the best-fit
+rectangle around the contour:
+
+..
+
+ *A blob's Aspect Ratio is the ratio of boxFit long side to short side. A
+ perfect Square has an Aspect Ratio of 1. All others are > 1.*
+
+
+.. tip::
+ The boxFit is not required to stay inside the ROI. Also the boxFit may be
+ **tilted** at some angle, namely not horizontal. This will be discussed
+ more in a later page.
+
+The **fourth column** shows the (X, Y) position of the **Center** of the boxFit
+rectangle. With the origin at the full image's top left corner, X increases to
+the right and Y increases downward.
+
+Blob Formation
+--------------
+
+So far these examples have shown a **single Blob** formed by OpenCV:
+
+.. figure:: images/40-single-blob.png
+ :align: center
+ :width: 50%
+ :alt: Boxfit and detection within ROI
+
+ Single blob discovery
+
+But OpenCV can form and return **multiple Blobs** in a single set of results:
+
+.. figure:: images/43-two-blobs.png
+ :align: center
+ :width: 50%
+ :alt: Two blobs detected
+
+ Two blob discovery
+
+Without controls, OpenCV can easily form a **high number of Blobs** (at least
+12 here):
+
+.. figure:: images/45-many-blobs.png
+ :align: center
+ :width: 50%
+ :alt: Multiple blob discovery
+
+ Multiple blob discovery
+
+And as mentioned above, some of those Blobs might have a **boxFit tilted** at
+some angle:
+
+.. figure:: images/49-tilted-box.png
+ :align: center
+ :width: 50%
+ :alt: Tilted Boxfit
+
+ Tilted Boxfit
+
+This tutorial's **next two pages** show how to manage these scenarios by
+**editing the OpMode's default settings**, and **accessing more OpenCV
+features** not covered in the Sample OpMode.
+
+Using boxFit Data for Position
+------------------------------
+
+A team's Autonomous code can evaluate boxFit data to navigate or guide the
+robot on the field.
+
+Imagine your camera is on the robot, looking forward. **Underneath the
+camera** is your **intake mechanism**, perhaps a top grabber, sideways claw
+or spinner.
+
+.. figure:: images/80-targeting.png
+ :align: center
+ :width: 50%
+ :alt: Game piece targeting
+
+ Game piece targeting
+
+OpenCV will report the data for this orange boxFit. Could your code use this
+data to **position the robot** directly in front of the game piece, for a
+better chance to collect it?
+
+How would you do it?
+
+Using boxFit Data for Manipulation
+----------------------------------
+
+For advanced teams: imagine your webcam is on a grabber arm, looking down into
+the Submersible (from INTO THE DEEP).
+
+.. figure:: images/82-targeting.png
+ :align: center
+ :width: 50%
+ :alt: Targeting in clutter
+
+ Targeting in clutter
+
+Could the data from this boxFit (orange rectangle) help you **grab only the
+Blue Sample**\ ?
+
+Could this help in Autonomous **and** TeleOp?
+
+More Documentation
+------------------
+
+This tutorial's next page called :doc:`Explore <../color-locator-explore/color-locator-explore>` covers
+**editing the OpMode's existing default settings**.
+
+After that, the following page called :doc:`Challenge
+<../color-locator-challenge/color-locator-challenge>` shows how to **access
+more OpenCV features** not covered in the Sample OpMode.
+
+============
+
+*Questions, comments and corrections to westsiderobotics@verizon.net*
diff --git a/docs/source/color_processing/color-locator-discover/images/20-blob-zoom.png b/docs/source/color_processing/color-locator-discover/images/20-blob-zoom.png
new file mode 100644
index 00000000..88b26576
Binary files /dev/null and b/docs/source/color_processing/color-locator-discover/images/20-blob-zoom.png differ
diff --git a/docs/source/color_processing/color-locator-discover/images/23-basic-telemetry.png b/docs/source/color_processing/color-locator-discover/images/23-basic-telemetry.png
new file mode 100644
index 00000000..453b8de0
Binary files /dev/null and b/docs/source/color_processing/color-locator-discover/images/23-basic-telemetry.png differ
diff --git a/docs/source/color_processing/color-locator-discover/images/30-CameraStream.png b/docs/source/color_processing/color-locator-discover/images/30-CameraStream.png
new file mode 100644
index 00000000..c0081dad
Binary files /dev/null and b/docs/source/color_processing/color-locator-discover/images/30-CameraStream.png differ
diff --git a/docs/source/color_processing/color-locator-discover/images/32-LiveView.png b/docs/source/color_processing/color-locator-discover/images/32-LiveView.png
new file mode 100644
index 00000000..cbd731d5
Binary files /dev/null and b/docs/source/color_processing/color-locator-discover/images/32-LiveView.png differ
diff --git a/docs/source/color_processing/color-locator-discover/images/35-basic-telemetry-circled.png b/docs/source/color_processing/color-locator-discover/images/35-basic-telemetry-circled.png
new file mode 100644
index 00000000..953124a6
Binary files /dev/null and b/docs/source/color_processing/color-locator-discover/images/35-basic-telemetry-circled.png differ
diff --git a/docs/source/color_processing/color-locator-discover/images/40-single-blob.png b/docs/source/color_processing/color-locator-discover/images/40-single-blob.png
new file mode 100644
index 00000000..c0bd4102
Binary files /dev/null and b/docs/source/color_processing/color-locator-discover/images/40-single-blob.png differ
diff --git a/docs/source/color_processing/color-locator-discover/images/43-two-blobs.png b/docs/source/color_processing/color-locator-discover/images/43-two-blobs.png
new file mode 100644
index 00000000..82a6ae47
Binary files /dev/null and b/docs/source/color_processing/color-locator-discover/images/43-two-blobs.png differ
diff --git a/docs/source/color_processing/color-locator-discover/images/45-many-blobs.png b/docs/source/color_processing/color-locator-discover/images/45-many-blobs.png
new file mode 100644
index 00000000..930d6f3a
Binary files /dev/null and b/docs/source/color_processing/color-locator-discover/images/45-many-blobs.png differ
diff --git a/docs/source/color_processing/color-locator-discover/images/49-tilted-box.png b/docs/source/color_processing/color-locator-discover/images/49-tilted-box.png
new file mode 100644
index 00000000..9c9b070d
Binary files /dev/null and b/docs/source/color_processing/color-locator-discover/images/49-tilted-box.png differ
diff --git a/docs/source/color_processing/color-locator-discover/images/80-targeting.png b/docs/source/color_processing/color-locator-discover/images/80-targeting.png
new file mode 100644
index 00000000..9b9715cb
Binary files /dev/null and b/docs/source/color_processing/color-locator-discover/images/80-targeting.png differ
diff --git a/docs/source/color_processing/color-locator-discover/images/82-targeting.png b/docs/source/color_processing/color-locator-discover/images/82-targeting.png
new file mode 100644
index 00000000..94f6568d
Binary files /dev/null and b/docs/source/color_processing/color-locator-discover/images/82-targeting.png differ
diff --git a/docs/source/color_processing/color-locator-explore/color-locator-explore.rst b/docs/source/color_processing/color-locator-explore/color-locator-explore.rst
new file mode 100644
index 00000000..0cd5b300
--- /dev/null
+++ b/docs/source/color_processing/color-locator-explore/color-locator-explore.rst
@@ -0,0 +1,502 @@
+Color Locator (Explore)
+==========================
+
+Overview
+--------
+
+This **Explore** page shows how to modify the default settings of the
+**ColorLocator** Sample OpMode. It assumes you have already followed this
+tutorial's previous :doc:`Discover
+<../color-locator-discover/color-locator-discover>` page, to open and test this
+OpMode.
+
+ColorLocator has only two required **inputs**\ :
+
+* target color range
+* Region of Interest (ROI)
+
+Both of these can be specified in multiple ways.
+
+This Explore page covers **settings** that are already in the Sample:
+
+* which contour types to process
+* whether to draw contours in the preview
+* pre-processing of the image
+* camera resolution
+* post-filter the Blob results
+* post-sort the Blob results
+
+Building the VisionPortal
+-------------------------
+
+The Sample OpMode first creates a "Color Blob Locator" **Processor** using the
+Java **Builder** pattern. This is the same Builder pattern used to create an
+AprilTag Processor, and previously a TensorFlow Processor.
+
+The Sample OpMode then creates a **VisionPortal**, again using a Builder
+pattern. This includes adding the "Color Blob Locator" Processor to the
+VisionPortal.
+
+The FTC VisionPortal was introduced in 2023. More information is available
+on the `ftc-docs VisionPortal Page `_.
+
+Target Color Range
+------------------
+
+The "target color" is actually a **range** of numerical color values, for a
+better chance of finding the desired color.
+
+Each **Swatch** name (BLUE, RED, YELLOW, GREEN) has been pre-programmed with a
+range of color values to detect most shades of that color, in most lighting
+conditions.
+
+The values for Red, Blue and Yellow were tuned for the plastic game pieces
+(called Samples) from INTO THE DEEP.
+
+Select and read the Blocks **or** Java section below:
+
+.. tab-set::
+ .. tab-item:: Blocks
+ :sync: blocks
+
+ The sample OpMode uses this Builder Block to specify the target color Swatch:
+
+ .. figure:: images/10-setColorRangeSwatch.png
+ :width: 75%
+ :align: center
+ :alt: Setting the color swatch
+
+ Setting the Color Range Swatch
+
+ Use the drop-down list to select the Swatch of the desired target color
+ range.
+
+ As an alternate, this Block can be replaced with the following Block in
+ the Vision/ColorBlobLocator/Processor.Builder toolbox:
+
+ .. figure:: images/20-setColorRangeCustom.png
+ :width: 75%
+ :align: center
+ :alt: Setting custom color range
+
+ Setting a custom color range
+
+ First use the drop-down list (green arrow) to choose the **Color Space**
+ : YCrCb, HSV or RGB. Learn more about Color Spaces at the separate page
+ in this tutorial.
+
+ Then select the numerical values in that Color Space to define the range
+ of the target color.
+
+ The ``min`` and ``max`` fields relate to a corresponding pair of values,
+ namely (v0, v0), (v1, v1) or (v2, v2).
+
+ .. tab-item:: Java
+ :sync: java
+
+ In the Sample OpMode, follow the instructions at about lines 70-75 to set
+ the desired target color range at about line 110.
+
+ Use a predefined Swatch, or set a custom range in a specified Color Space
+ (YCrCb, HSV or RGB). Learn more about Color Spaces at the separate page
+ in this tutorial.
+
+ .. code-block:: java
+
+ import org.opencv.core.Scalar;
+ .
+ .
+ // use a predefined color match
+ .setTargetColorRange(ColorRange.BLUE)
+ // Available predefined colors are: RED, BLUE, YELLOW, GREEN
+ .
+ // or define your own color match
+ .setTargetColorRange(new ColorRange(ColorSpace.YCrCb,
+ new Scalar( 32, 176, 0),
+ new Scalar(255, 255, 132)))
+
+Region of Interest (ROI)
+------------------------
+
+The Blocks and Java Sample OpModes give this description:
+
+..
+
+ *Focus the color locator by defining a RegionOfInterest (ROI) which you want
+ to search. This can be the entire frame, or a sub-region defined using
+ standard image coordinates or a normalized +/- 1.0 coordinate system. Use
+ one form of the ImageRegion class to define the ROI.*
+
+
+Caution: changing the ROI size and/or changing the camera resolution may
+require an adjustment to filtering by Area. Post-filtering is covered here at
+this tutorial's **Explore** page, and pre-filtering is covered at the following
+:doc:`Challenge <../color-locator-challenge/color-locator-challenge>`
+page.
+
+Select and read the Blocks **or** Java section below:
+
+.. tab-set::
+ .. tab-item:: Blocks
+ :sync: blocks
+
+ .. figure:: images/30-setROI.png
+ :width: 75%
+ :align: center
+ :alt: Setting ROI
+
+ Setting the ROI
+
+ .. tab-item:: Java
+ :sync: java
+
+ In the Sample OpMode, follow the instructions at about lines 77-83 to set
+ the desired ROI at about line 112.
+
+ .. code-block:: java
+
+ .setRoi(ImageRegion.entireFrame())
+ .
+ // 100x100 pixel square near the upper left corner
+ .setRoi(ImageRegion.asImageCoordinates(50, 50, 150, 150))
+ .
+ // 50% width/height square centered on screen
+ .setRoi(ImageRegion.asUnityCenterCoordinates(-0.5, 0.5, 0.5, -0.5))
+
+Choice of Contours
+------------------
+
+The Blocks and Java Sample OpModes give this description:
+
+..
+
+ *Define which contours are included. You can get ALL the contours, or you
+ can skip any contours that are completely inside another contour. note:
+ EXTERNAL_ONLY helps to avoid bright reflection spots from breaking up areas
+ of solid color.*
+
+
+Also, the display of contours (in the previews) can be turned ON or OFF:
+
+..
+
+ Turning this on helps debugging but takes up valuable CPU time.
+
+
+Select and read the Blocks **or** Java section below:
+
+.. tab-set::
+ .. tab-item:: Blocks
+ :sync: blocks
+
+ .. figure:: images/40-contourChoices.png
+ :width: 75%
+ :align: center
+ :alt: Contour Choices
+
+ Contour Choices
+
+ .. tab-item:: Java
+ :sync: java
+
+ In the Sample OpMode, follow the instructions at about lines 85-92 to set
+ the desired contour mode and drawing setting at about lines 111 and 113,
+ respectively.
+
+ .. code-block:: java
+
+ // return all contours
+ .setContourMode(ColorBlobLocatorProcessor.ContourMode.ALL_FLATTENED_HIERARCHY)
+ .
+ // exclude contours inside other contours
+ .setContourMode(ColorBlobLocatorProcessor.ContourMode.EXTERNAL_ONLY)
+ .
+ // show contours in the DS and RC previews
+ .setDrawContours(true)
+
+Image Pre-Processing
+--------------------
+
+The default Sample OpMode purposely **blurs** the camera's image. This
+"pre-processing" happens **before** OpenCV performs Blob formation, thus
+affecting the contours seen in DS and RC previews.
+
+The effect is very small (default kernel size of 5x5 pixels), but can
+significantly improve Blob formation, giving more useful results.
+
+Blurring is one of three available image adjustments to improve processing
+results. You can experiment with these advanced tools, after studying their
+usage. See links at the section below called **More Documentation**.
+
+The Blocks and Java Sample OpModes give this description:
+
+..
+
+ Include any pre-processing of the image or mask before looking for Blobs.
+
+ There is some extra processing you can include to improve the formation of
+ blobs. Using these features requires an understanding of how they may
+ affect the final blobs. The "pixels" argument sets the NxN kernel size.
+
+ **Blurring** an image helps to provide a smooth color transition between
+ objects, and smoother contours. The higher the number of pixels, the more
+ blurred the image becomes. Note: Even "pixels" values will be incremented
+ to satisfy the "odd number" requirement. Blurring too much may hide smaller
+ features. A "pixels" size of 5 is good for a 320x240 image.
+
+ **Erosion** removes floating pixels and thin lines so that only substantive
+ objects remain. Erosion can grow holes inside regions, and also shrink
+ objects. A "pixels" value in the range of 2-4 is suitable for low res
+ images.
+
+ **Dilation** makes objects more visible by filling in small holes, making
+ lines appear thicker, and making filled shapes appear larger. Dilation is
+ useful for joining broken parts of an object, such as when removing noise
+ from an image. A "pixels" value in the range of 2-4 is suitable for low res
+ images.
+
+
+Select and read the Blocks **or** Java section below:
+
+.. tab-set::
+ .. tab-item:: Blocks
+ :sync: blocks
+
+ .. figure:: images/50-pre-process.png
+ :width: 75%
+ :align: center
+ :alt: Pre-processor options
+
+ Pre-processor Options
+
+ .. tab-item:: Java
+ :sync: java
+
+ In the Sample OpMode, follow the instructions at about lines 94-107 to
+ set the desired pre-processing at about line 114.
+
+ .. code-block:: java
+
+ .setBlurSize(int pixels)
+ .setErodeSize(int pixels)
+ .setDilateSize(int pixels)
+
+Any of these pre-processing settings can be **disabled** by setting their pixel
+value to zero, or by removing the command.
+
+In the FTC processor, any specified erosion is performed **before** dilation.
+This removes specular noise, then returns the remaining blobs to a size similar
+to their original size. (This also will **not** be on the final.)
+
+Camera Resolution
+-----------------
+
+The Sample OpMode uses a default camera resolution of 320 x 240 pixels,
+supported by most webcams and Android phone cameras. You may edit this
+resolution, subject to a trade-off between:
+
+* computing performance, and
+* image detail, possibly needed beyond ColorLocator.
+
+Caution: changing the camera resolution and/or changing the ROI size may
+require an adjustment to filtering by Area. Post-filtering is covered here at
+this tutorial's **Explore** page, and pre-filtering is covered at the following
+:doc:`Challenge <../color-locator-challenge/color-locator-challenge>` page.
+
+The Blocks and Java Sample OpModes give this description:
+
+..
+
+ Set the desired video resolution. Since a high resolution will not improve
+ this process, choose a lower resolution that is supported by your camera.
+ This will improve overall performance and reduce latency.
+
+
+Select and read the Blocks **or** Java section below:
+
+.. tab-set::
+ .. tab-item:: Blocks
+ :sync: blocks
+
+ .. figure:: images/60-camera-resolution.png
+ :width: 75%
+ :align: center
+ :alt: Camera Resolution
+
+ Camera Resolution
+
+ .. tab-item:: Java
+ :sync: java
+
+ In the Sample OpMode, follow the instructions at about lines 121-123 to
+ set the desired camera resolution at about line 131. This setting is
+ made in the VisionPortal Builder, not the Processor Builder.
+
+ .. code-block:: java
+
+ .setCameraResolution(new Size(320, 240))
+
+Post-filter the Blob Results
+----------------------------
+
+After OpenCV has formed Blobs and provided results with the ``getBlobs()``
+command (in Blocks and Java), your OpMode can **post-filter** or reduce the
+list.
+
+Here the term "post-" means after Blob formation and **after the DS and RC
+previews**. So, you will still see contours and boxFits for **all Blobs**.
+
+By default, the Sample OpMode uses a **Contour Area** filter of 50 pixels
+(minimum) to 20,000 pixels (maximum). The lower limit eliminates very small
+Blobs, while the upper limit is approximately the size of the default Region of
+Interest (ROI).
+
+Caution: changing the ROI size and/or changing the camera resolution may
+require an adjustment to filtering by Area.
+
+.. tip::
+ Remember that a Blob contour never extends beyond the ROI, although a boxFit
+ may do so.
+
+Why filter? A smaller list means faster processing, with fewer boxFits for
+your OpMode to evaluate.
+
+You can experiment with increasing the lower limit, and observing the effect on
+Telemetry. Also experiment with the other filters for **Density** and **Aspect
+Ratio**.
+
+The Blocks and Java Sample OpModes give this description:
+
+..
+
+ The list of Blobs can be filtered to remove unwanted Blobs. Note: All
+ contours will be still displayed on the Stream Preview, but only those that
+ satisfy the filter conditions will remain in the current list of "blobs".
+ Multiple filters may be used. Use any of the following filters.
+
+ **Util.filterByArea()** A Blob's area is the number of pixels contained
+ within the contour. Filter out any that are too big or small. Start with a
+ large range and then refine the range based on the likely size of the
+ desired object in the viewfinder.
+
+ **Util.filterByDensity()** A blob's density is an indication of how "full"
+ the contour is. If you put a rubber band around the contour you would get
+ the "Convex Hull" of the contour. The density is the ratio of Contour-area
+ to Convex Hull-area.
+
+ **Util.filterByAspectRatio()** A blob's aspect ratio is the ratio of
+ **boxFit** long side to short side. A perfect square has an aspect ratio of
+ 1. All others are > 1
+
+
+Select and read the Blocks **or** Java section below:
+
+.. tab-set::
+ .. tab-item:: Blocks
+ :sync: blocks
+
+ .. figure:: images/70-post-filter.png
+ :width: 75%
+ :align: center
+ :alt: post filter
+
+ Post Filter
+
+ .. tab-item:: Java
+ :sync: java
+
+ In the Sample OpMode, follow the instructions at about lines 147-164 to
+ set the desired post-filtering at about line 166.
+
+ .. code-block:: java
+
+ ColorBlobLocatorProcessor.Util.filterByArea(minArea, maxArea, blobs);
+ ColorBlobLocatorProcessor.Util.filterByDensity(minDensity, maxDensity, blobs);
+ ColorBlobLocatorProcessor.Util.filterByAspectRatio(minAspect, maxAspect, blobs);
+
+Post-filtering commands should be placed **after** calling ``getBlobs()`` and
+**before** your OpMode's handling (or Telemetry) of the ``getBlobs()`` results.
+Remember this as you incorporate these tools into your team's larger OpModes.
+
+Post-sort the Blob Results
+--------------------------
+
+After OpenCV has formed Blobs and provided results with the ``getBlobs()``
+command (in Blocks and Java), your OpMode can **post-sort** the list.
+
+By default, the Sample OpMode sorts by **Contour Area** in descending order
+(largest is first). This is an internally programmed sort, not appearing in
+the Sample OpMode. This default is overridden or replaced by any sort
+specified in the OpMode.
+
+Why sort? A sorted list means your OpMode can process Blobs in a known order,
+perhaps allowing your code to quickly reach a "conclusion". Namely some logic
+condition (probably about boxFits) could be satisfied sooner, to exit the
+vision processing loop and move on to robot action.
+
+The Blocks and Java Sample OpModes give this description:
+
+..
+
+ *The list of Blobs can be sorted using the same Blob attributes as listed
+ above. No more than one sort call should be made. Sorting can use
+ ascending or descending order.*
+
+Select and read the Blocks **or** Java section below:
+
+.. tab-set::
+ .. tab-item:: Blocks
+ :sync: blocks
+
+ .. figure:: images/80-post-sort.png
+ :width: 75%
+ :align: center
+ :alt: Post Sort
+
+ Post Sort
+
+ .. tab-item:: Java
+ :sync: java
+
+ In the Sample OpMode, follow the instructions at about lines 169-173 to
+ set the desired post-sorting immediately after those instructions.
+
+ .. code-block:: java
+
+ ColorBlobLocatorProcessor.Util.sortByArea(SortOrder.DESCENDING, blobs); // Default
+ ColorBlobLocatorProcessor.Util.sortByDensity(SortOrder.DESCENDING, blobs);
+ ColorBlobLocatorProcessor.Util.sortByAspectRatio(SortOrder.DESCENDING, blobs);
+
+A post-sorting command should be placed **after** calling ``getBlobs()`` and
+any post-filtering, and **before** your OpMode's handling (or Telemetry) of the
+``getBlobs()`` results. Remember this as you incorporate these tools into your
+team's larger OpModes.
+
+More Documentation
+------------------
+
+How does OpenCV match colors here? The upper and lower values of the target
+color range are used to **threshold** the image's pixels and find those within
+the range. Technical information on thresholding is available at the `OpenCV
+website for thresholding `_.
+
+Technical information on Blur, Erosion and Dilation can be found `here
+`_
+and at the `OpenCV website for morphology
+`_.
+
+Here's a conceptual note from co-developer `@Windwoes `_\ :
+
+..
+
+ The command ``getBlobs()`` does not initiate or perform the processing (Blob
+ formation). The processing is **happening continuously**; ``getBlobs()``
+ just obtains a reference to the latest results.
+
+Next, this tutorial's :doc:`Challenge <../color-locator-challenge/color-locator-challenge>` page shows how to
+**access more OpenCV features** not covered in the Sample OpMode.
+
+============
+
+*Questions, comments and corrections to westsiderobotics@verizon.net*
+
diff --git a/docs/source/color_processing/color-locator-explore/images/10-setColorRangeSwatch.png b/docs/source/color_processing/color-locator-explore/images/10-setColorRangeSwatch.png
new file mode 100644
index 00000000..a63e531e
Binary files /dev/null and b/docs/source/color_processing/color-locator-explore/images/10-setColorRangeSwatch.png differ
diff --git a/docs/source/color_processing/color-locator-explore/images/20-setColorRangeCustom.png b/docs/source/color_processing/color-locator-explore/images/20-setColorRangeCustom.png
new file mode 100644
index 00000000..9875551c
Binary files /dev/null and b/docs/source/color_processing/color-locator-explore/images/20-setColorRangeCustom.png differ
diff --git a/docs/source/color_processing/color-locator-explore/images/30-setROI.png b/docs/source/color_processing/color-locator-explore/images/30-setROI.png
new file mode 100644
index 00000000..39354531
Binary files /dev/null and b/docs/source/color_processing/color-locator-explore/images/30-setROI.png differ
diff --git a/docs/source/color_processing/color-locator-explore/images/40-contourChoices.png b/docs/source/color_processing/color-locator-explore/images/40-contourChoices.png
new file mode 100644
index 00000000..d701b50f
Binary files /dev/null and b/docs/source/color_processing/color-locator-explore/images/40-contourChoices.png differ
diff --git a/docs/source/color_processing/color-locator-explore/images/50-pre-process.png b/docs/source/color_processing/color-locator-explore/images/50-pre-process.png
new file mode 100644
index 00000000..7122db97
Binary files /dev/null and b/docs/source/color_processing/color-locator-explore/images/50-pre-process.png differ
diff --git a/docs/source/color_processing/color-locator-explore/images/60-camera-resolution.png b/docs/source/color_processing/color-locator-explore/images/60-camera-resolution.png
new file mode 100644
index 00000000..511377cd
Binary files /dev/null and b/docs/source/color_processing/color-locator-explore/images/60-camera-resolution.png differ
diff --git a/docs/source/color_processing/color-locator-explore/images/70-post-filter.png b/docs/source/color_processing/color-locator-explore/images/70-post-filter.png
new file mode 100644
index 00000000..94db9513
Binary files /dev/null and b/docs/source/color_processing/color-locator-explore/images/70-post-filter.png differ
diff --git a/docs/source/color_processing/color-locator-explore/images/80-post-sort.png b/docs/source/color_processing/color-locator-explore/images/80-post-sort.png
new file mode 100644
index 00000000..8700414d
Binary files /dev/null and b/docs/source/color_processing/color-locator-explore/images/80-post-sort.png differ
diff --git a/docs/source/color_processing/color-processing-introduction/color-processing-introduction.rst b/docs/source/color_processing/color-processing-introduction/color-processing-introduction.rst
deleted file mode 100644
index 6bffa5eb..00000000
--- a/docs/source/color_processing/color-processing-introduction/color-processing-introduction.rst
+++ /dev/null
@@ -1,15 +0,0 @@
-FTC Color Processing - Introduction
-===================================
-
-Overview
---------
-
-As of FTC SDK 10.1, the *FIRST* Tech Challenge Software Development Kit now
-includes some Color Processing features from OpenCV, a popular and powerful
-open-source library for vision processing.
-
-Introduced for *INTO THE DEEP*, these new features will help FTC teams identify
-colors and process color blobs, useful in any game requiring color and shape
-recognition.
-
-The full contents of this documentation will be released 9/20/2024!
diff --git a/docs/source/color_processing/color-sensor/color-sensor.rst b/docs/source/color_processing/color-sensor/color-sensor.rst
new file mode 100644
index 00000000..86aee8b4
--- /dev/null
+++ b/docs/source/color_processing/color-sensor/color-sensor.rst
@@ -0,0 +1,403 @@
+Color Processing Color Sensor
+=============================
+
+Overview
+--------
+
+A simple way to use FTC's new OpenCV vision tools is to operate a "Color
+Sensor". Namely, it can determine **the color seen by the robot's camera**\ ,
+in a specified zone.
+
+Below, the small central rectangle is the region being evaluated:
+
+.. figure:: images/10-sensor-intro.png
+ :width: 75%
+ :align: center
+ :alt: INTO THE DEEP game pieces
+
+ Color sensor detection zone
+
+A key benefit is that the camera can be much further away from the object than,
+for example, a REV Color Sensor or others like it.
+
+It's still important to accurately point the camera and carefully select the
+image zone to inspect.
+
+For the above example, OpenCV can provide results like this:
+
+.. figure:: images/20-telemetry-intro.png
+ :width: 75%
+ :align: center
+ :alt: Driver Station App showing RED detection
+
+ RED Detection using Color Sensor
+
+The following sections describe how to do this, with a Sample OpMode.
+
+Configuration
+-------------
+
+*Skip this section if ...*
+
+
+* *the active robot configuration already contains "Webcam 1"*, or
+* *using the built-in camera of an Android phone as Robot Controller.*
+
+Before starting the programming, REV Control Hub users should make a robot
+configuration that includes the USB webcam to be used as a color sensor.
+
+For now, use the default webcam name, "Webcam 1". If a different name is
+preferred, edit the Sample OpMode to agree with the exact webcam name in the
+robot configuration.
+
+Save and activate that configuration; its name should appear on the paired
+Driver Station screen.
+
+Sample OpMode
+-------------
+
+Opening the Sample OpMode
++++++++++++++++++++++++++
+
+To learn about opening the Sample OpMode, click the tab for Blocks or Java:
+
+.. tab-set::
+ .. tab-item:: Blocks
+ :sync: blocks
+
+ 1. On a laptop or desktop computer connected via Wi-Fi to the Robot
+ Controller, open the Chrome browser. Go to the REV Control
+ Hub's address ``http://192.168.43.1:8080`` (or
+ ``http://192.168.49.1:8080`` for Android RC phone) and click the
+ *Blocks* tab.
+
+ 2. Click ``Create New OpMode``\ , enter a new name such as
+ "ColorSensor_Maria_v01", and select the Sample OpMode
+ ``ConceptVisionColorSensor``.
+
+ 3. At the top of the Blocks screen, you can change the type from "TeleOp" to
+ "Autonomous", since this Sample OpMode does not use gamepads.
+
+ 4. If using the built-in camera of an RC phone, drag out the relevant Block
+ from the left-side ``VisionPortal.Builder`` toolbox.
+
+ 5. Save the OpMode, time to try it!
+
+
+ .. tab-item:: Java
+ :sync: java
+
+ 1. Open your choice of OnBot Java or Android Studio.
+
+ 2. In the ``teamcode`` folder, add/create a new OpMode with a name such as
+ "ColorSensor_Bobby_v01.java", and select the Sample OpMode
+ ``ConceptVisionColorSensor.java``.
+
+ 3. At about Line 58, you can change ``@TeleOp`` to ``@Autonomous``\ , since
+ this Sample OpMode does not use gamepads.
+
+ 4. If using the built-in camera of an RC phone, follow the OpMode comments
+ to specify that camera.
+
+ 5. Click "Build", time to try it!
+
+Running the Sample OpMode
++++++++++++++++++++++++++
+
+On the Driver Station:
+
+1. Select the Autonomous OpMode that you just saved or built.
+2. Turn off the automatic 30-second match timer (green slider).
+3. Touch INIT only.
+
+The OpMode should give Telemetry, stating the main "matched" color inside the Region of Interest.
+
+.. figure:: images/30-DStelemetry.png
+ :width: 75%
+ :align: center
+ :alt: Driver Station Telemetry
+
+ Driver Station Telemetry
+
+Move the camera around, and watch the Telemetry area on the Driver Station
+screen. It should state "BLUE" when pointing at a blue object, and likewise
+should identify other common colors.
+
+**It's working!** You have a color sensor in your robot camera. Think about
+how to use this in the FTC Robot Game.
+
+*Skip the next two sections if you already know how to use FTC previews.*
+
+DS Preview
+----------
+
+Before describing how to modify the OpMode, this page offers two sections
+showing how to view the OpenCV results with **previews**. Previewing is
+essential for working with vision code.
+
+**Opening the DS Preview**
+
+1. On the Driver Station (DS), remain in INIT -- don't touch the Start button.
+2. At the top right corner, touch the 3-dots menu, then ``Camera Stream``.
+ This shows the camera's view; tap the image to refresh it.
+
+.. figure:: images/34-CameraStream.png
+ :width: 75%
+ :align: center
+ :alt: Camera Stream Preview
+
+ Camera Stream Preview
+
+Drawn on the image is the rectangle being evaluated, called the **Region of
+Interest** (ROI). The ROI border color is the rectangle's predominant color,
+reported to DS Telemetry.
+
+If that border "disappears" against a solid-color background, the thin white
+cross-hairs and 4 small white dots can still identify the ROI.
+
+For a BIG preview, touch the arrows at the bottom right corner.
+
+Or, select Camera Stream again, to return to the previous screen and its
+Telemetry.
+
+RC Preview
+----------
+
+The Robot Controller (RC) device also makes a preview, called ``LiveView``.
+This is full video, and is shown automatically on the screen of an RC phone.
+
+.. figure:: images/38-LiveView.png
+ :width: 75%
+ :align: center
+ :alt: Control Hub LiveView
+
+ Control Hub LiveView
+
+The above preview is from a REV Control Hub.
+
+It has no physical screen, so you must plug in an HDMI monitor **or** use
+open-source `scrcpy `_ (called "screen
+copy") to see the preview on a laptop or computer that's connected via Wi-Fi to
+the Control Hub.
+
+Modify the Sample
+-----------------
+
+This Sample OpMode is designed for the user to select/edit **two inputs**\ :
+
+
+* define the Region of Interest (ROI)
+* list the colors that might be found
+
+For the **first input**, there are 3 ways to define the ROI:
+
+* entire frame
+* sub-region, defined with standard image coordinates
+* sub-region, defined with a normalized +/- 1.0 coordinate system
+
+For the **second input**, you must list the candidate colors from which a
+result will be selected as a "Match".
+
+Simply choose from the 10 "Swatches": RED, ORANGE, YELLOW, GREEN, CYAN, BLUE,
+PURPLE, MAGENTA, BLACK, WHITE. For efficiency, add only those Swatches for
+which you reasonably expect to get a match.
+
+**The Blocks and Java OpModes contain detailed comments to guide you through
+these edits.** They are not repeated in this tutorial.
+
+Building the VisionPortal
+-------------------------
+
+The Sample OpMode first creates a "Predominant Color" **Processor** using the
+**Builder** pattern. This is the same Builder pattern used to create an
+AprilTag Processor, and previously a TensorFlow Processor.
+
+The Sample OpMode then creates a **VisionPortal**, again using a Builder
+pattern. This includes adding the "Predominant Color" Processor to the
+VisionPortal.
+
+How does OpenCV determine the "predominant color" of the ROI? An algorithm
+called `"k-means" `_
+determines clusters of similar colors. The color of the cluster with the most
+pixels is called "predominant" here. *(This will NOT be on the final.)*
+
+Testing the Result
+------------------
+
+After trying and learning how the commands work, you can incorporate this Color
+Sensor into your Autonomous and/or TeleOp OpModes.
+
+As seen in the OpMode's Telemetry section, the result is called
+``closestSwatch`` and appears as a word (RED, BLUE, etc.). But this is not
+plain text!
+
+**Testing**, or comparing, for a particular color-match must be done as
+follows. Select and read the Blocks **or** Java section below:
+
+.. tab-set::
+ .. tab-item:: Blocks
+ :sync: blocks
+
+ At the left side, pull out the following multi-Block from ``Vision/PredominantColor/Processor``:
+
+ .. figure:: images/50-closestSwatchCompare.png
+ :align: center
+ :width: 75%
+ :alt: Closest Swatch Comparison
+
+ Closest Swatch Comparison
+
+ You must use this special Block to determine if the result is (for example) RED.
+
+ Why? The result, called ``closestSwatch`` is not **text** (yes it seems
+ like text!). It's a type called ``Swatch`` and can be compared only to
+ another ``Swatch``.
+
+ .. tab-item:: Java
+ :sync: java
+
+ In the sample OpMode, here's the Telemetry that gives the result:
+
+ .. code-block:: java
+
+ telemetry.addData("Best Match:", result.closestSwatch);
+
+ This displays as text, but this is **not** Java type ``String``!
+
+ Here's how to determine if the result is (for example) RED:
+
+ .. code-block:: java
+
+ if (result.closestSwatch == Swatch.RED) { }
+
+ Why? The result, called ``closestSwatch`` is of type ``Swatch`` and can
+ be compared only to another ``Swatch``.
+
+
+OpMode Programming
+------------------
+
+The Color Sensor part of your team's Autonomous OpMode might include these
+goals:
+
+#. Seek a color, using the code from this Sample OpMode
+#. Take a robot action, based on finding that color
+
+If so, select and read the Blocks **or** Java section below:
+
+.. tab-set::
+ .. tab-item:: Blocks
+ :sync: blocks
+
+ Beginners often try this first:
+
+ .. figure:: images/55-IFclosestSwatchWrongWay.png
+ :alt: Wrong way to act upon match result
+ :width: 75%
+ :align: center
+
+ Wrong way to act upon match result
+
+ The problem is, after the robot does the action for RED, the OpMode is
+ still inside the vision loop. Very messy and unpredictable.
+
+ A better approach is to save the result (as text!), exit the loop, then
+ retrieve the stored result to take the desired RED action.
+
+ .. figure:: images/58-IFclosestSwatchRightWay.png
+ :alt: Right way to act upon match result
+ :width: 75%
+ :align: center
+
+ Right way to act upon match result
+
+ How to exit the vision loop? It could be based on `time
+ `_
+ , or finding a particular color, or finding a particular color 10 times
+ in a row, or finding only a particular color for 1 full second, or any
+ other desired criteria.
+
+ .. tab-item:: Java
+ :sync: java
+
+ The color result is generated inside a vision loop. Save the result (as
+ text!), exit the loop, then retrieve the stored result to take the
+ desired RED action.
+
+ .. code-block:: java
+
+ String savedColorMatch = "NULL";
+ .
+ .
+ if (result.closestSwatch == Swatch.RED) {
+ savedColorMatch = "RED";
+ // your code here: optional to exit the vision loop based on your criteria
+ }
+ .
+ .
+ // After exiting the vision loop...
+ if (savedColorMatch == "RED") {
+ // your code here: robot actions if the ROI was RED
+ }
+
+ How to exit the vision loop? It could be based on time, or finding a
+ particular color, or finding a particular color 10 times in a row, or
+ finding only a particular color for 1 full second, or any other desired
+ criteria.
+
+Advanced Use
+------------
+
+Some teams may prefer to read and evaluate the **actual RGB color values**,
+rather than rely on a generic Swatch result.
+
+RGB is a **Color Space** that uses three numerical components of Red, Green and
+Blue. Values range from 0 to 255. For more info, see this tutorial's :doc:`Color
+Spaces <../color-spaces/color-spaces>` page.
+
+Extracting the RGB components can be seen in the Telemetry portion of the
+Sample OpMode. Click the Blocks or Java tab:
+
+.. tab-set::
+ .. tab-item:: Blocks
+ :sync: blocks
+
+ Here are the RGB components of the ROI's predominant color:
+
+ .. figure:: images/70-Blocks-RGB.png
+ :alt: Finding color by RGB
+ :width: 75%
+ :align: center
+
+ Note: the ``Color`` Block has a drop-down list that includes Hue,
+ Saturation and Value. Those settings will **not work** here, to produce
+ components in the HSV Color Space, because the source Block provides only
+ RGB color (its method name is ``.rgb``\ ).
+
+ .. tab-item:: Java
+ :sync: java
+
+ Here are the RGB components of the ROI's predominant color:
+
+ * ``Color.red(result.rgb)``
+ * ``Color.green(result.rgb)``
+ * ``Color.blue(result.rgb)``
+
+For Blocks or Java, those component values can be assigned to numeric
+variables, with names like ``ROIRedValue``, ``ROIGreenValue``, and
+``ROIBlueValue``.
+
+Now your code can process those RGB variables as desired.
+
+Next Sections
+-------------
+
+Soon you can move ahead to try the **Color Locator** processor.
+
+But first, learn a few basic concepts at this tutorial's :doc:`Color Blob Concepts
+<../color-blob-concepts/color-blob-concepts>` page.
+
+============
+
+*Questions, comments and corrections to westsiderobotics@verizon.net*
+
diff --git a/docs/source/color_processing/color-sensor/images/10-sensor-intro.png b/docs/source/color_processing/color-sensor/images/10-sensor-intro.png
new file mode 100644
index 00000000..9ea4b361
Binary files /dev/null and b/docs/source/color_processing/color-sensor/images/10-sensor-intro.png differ
diff --git a/docs/source/color_processing/color-sensor/images/20-telemetry-intro.png b/docs/source/color_processing/color-sensor/images/20-telemetry-intro.png
new file mode 100644
index 00000000..78f93e88
Binary files /dev/null and b/docs/source/color_processing/color-sensor/images/20-telemetry-intro.png differ
diff --git a/docs/source/color_processing/color-sensor/images/30-DStelemetry.png b/docs/source/color_processing/color-sensor/images/30-DStelemetry.png
new file mode 100644
index 00000000..24116123
Binary files /dev/null and b/docs/source/color_processing/color-sensor/images/30-DStelemetry.png differ
diff --git a/docs/source/color_processing/color-sensor/images/34-CameraStream.png b/docs/source/color_processing/color-sensor/images/34-CameraStream.png
new file mode 100644
index 00000000..a655b8c9
Binary files /dev/null and b/docs/source/color_processing/color-sensor/images/34-CameraStream.png differ
diff --git a/docs/source/color_processing/color-sensor/images/38-LiveView.png b/docs/source/color_processing/color-sensor/images/38-LiveView.png
new file mode 100644
index 00000000..c092b4e6
Binary files /dev/null and b/docs/source/color_processing/color-sensor/images/38-LiveView.png differ
diff --git a/docs/source/color_processing/color-sensor/images/50-closestSwatchCompare.png b/docs/source/color_processing/color-sensor/images/50-closestSwatchCompare.png
new file mode 100644
index 00000000..afbbb276
Binary files /dev/null and b/docs/source/color_processing/color-sensor/images/50-closestSwatchCompare.png differ
diff --git a/docs/source/color_processing/color-sensor/images/55-IFclosestSwatchWrongWay.png b/docs/source/color_processing/color-sensor/images/55-IFclosestSwatchWrongWay.png
new file mode 100644
index 00000000..647095f2
Binary files /dev/null and b/docs/source/color_processing/color-sensor/images/55-IFclosestSwatchWrongWay.png differ
diff --git a/docs/source/color_processing/color-sensor/images/58-IFclosestSwatchRightWay.png b/docs/source/color_processing/color-sensor/images/58-IFclosestSwatchRightWay.png
new file mode 100644
index 00000000..fc9bb15f
Binary files /dev/null and b/docs/source/color_processing/color-sensor/images/58-IFclosestSwatchRightWay.png differ
diff --git a/docs/source/color_processing/color-sensor/images/70-Blocks-RGB.png b/docs/source/color_processing/color-sensor/images/70-Blocks-RGB.png
new file mode 100644
index 00000000..5112bb81
Binary files /dev/null and b/docs/source/color_processing/color-sensor/images/70-Blocks-RGB.png differ
diff --git a/docs/source/color_processing/color-spaces/color-spaces.rst b/docs/source/color_processing/color-spaces/color-spaces.rst
new file mode 100644
index 00000000..81eb07f5
--- /dev/null
+++ b/docs/source/color_processing/color-spaces/color-spaces.rst
@@ -0,0 +1,137 @@
+Color Spaces
+============
+
+Overview
+--------
+
+This page of the FTC Color Processing tutorial introduces **Color Spaces**.
+
+OpenCV can process color information using any one of several Color Spaces,
+which are methods to describe an exact shade and brightness.
+
+This page describes 3 choices available in the FTC SDK:
+
+* RGB (Red, Green, Blue)
+* HSV (Hue, Saturation, Value)
+* YCrCb (Luminance, Chrominance red, Chrominance blue)
+
+Each Color Space uses 3 numbers, 0 to 255, to describe a particular color.
+
+RGB Color Space
+---------------
+
+**RGB** is a common Color Space, easy to understand. Its 3 components are:
+
+* **Red** (0 - 255)
+* **Green** (0 - 255)
+* **Blue** (0 - 255)
+
+.. figure:: images/10-RGB-wheel.png
+ :width: 75%
+ :align: center
+ :alt: RGB Color Wheel
+
+ RGB Color Wheel
+
+Pure Red has values 255 red, 0 green, 0 blue. Pure Green has 0 red, 255 green,
+0 blue.
+
+Magenta is a blend of Red and Blue, so its values are 255 red, 0 green, and 255
+blue.
+
+Here's a useful way to visualize the RGB Color Space, with one axis for each
+component:
+
+.. figure:: images/20-RGB-cube.png
+ :width: 75%
+ :align: center
+ :alt: RGB Cube Visualization
+
+ RGB Cube Visualization
+
+Each near-side external face of this box has the maximum value for one
+component. Every shade of color on the top face, for example, has a Blue
+component of 255.
+
+The nearest corner is **White**, with RGB values of (255, 255, 255). Namely,
+full values of Red, Green and Blue light will combine to appear as white light.
+
+Where is **Black**? It's the opposite corner, at the origin, hidden in this
+view. Its values are (0, 0, 0) -- no color at all.
+
+This RGB system is used only for light-based colors, including video. It does
+not apply for painted colors, or printed colors, which use other color systems.
+
+.. tip::
+ Mixing red paint, green paint and blue paint will **not** create white paint!
+
+Technical information is available at `Wikipedia Color Spaces `_.
+
+HSV Color Space
+---------------
+
+Another Color Space used by OpenCV is **HSV**\ : Hue, Saturation and Value.
+
+.. figure:: images/30-HSV-cone.png
+ :width: 75%
+ :align: center
+ :alt: HSV Cone Visualization
+
+ HSV Cone Visualization
+
+**Hue** is the actual shade of color; see the familiar color wheel on top.
+
+**Saturation** measures the amount of white: a lower value is whiter, or more
+grey. On the HSV cone, see the outward arrow for Saturation. The highest
+value of 255 is the fully saturated color (no white).
+
+**Value** measures brightness; see the upward arrow on the HSV cone. The top
+face of the cone (Value = 255), is the fully bright color. Black is found at
+the lower tip, Value = 0.
+
+Technical information is available at `Wikipedia HSL/HSV `_.
+
+YCrCb Color Space
+-----------------
+
+A third Color Space used by OpenCV is **YCrCb**.
+
+.. figure:: images/40-YCrCb.png
+ :width: 75%
+ :align: center
+ :alt: YCrCb Visualization
+
+ YCrCb Visualization
+
+The Y is **Luminance** or brightness, while Cr and Cb are red and blue
+components of **Chrominance**. Technical information is available at
+`Wikipedia YCbCr `_.
+
+The YCrCb Color Space offers efficient computation of color processing, and is
+widely used in video applications.
+
+Some online documentation refers to a Color Space called **YCbCr**. This is
+the same system, with the last 2 values reversed.
+
+How to choose?
+--------------
+
+Use the Color Space that's convenient for you. RGB is easy to understand,
+while YCrCb may offer better computational performance (if needed).
+
+It's easy to find free public websites to convert the 3 values from one Color
+Space into the corresponding 3 values from another Color Space.
+
+When converting to HSV, some online sites give Hue in degrees (0 to 360), and
+Saturation and Value as percentages (0 to 100). Apply these (as a proportion
+of the maximum) to 255, for values to use in the FTC **Color Locator**
+processor.
+
+The **Color Locator** processor can use any of these three Color Spaces. The
+simple **Color Sensor** processor uses YCrCb internally, but reports results in
+RGB only.
+
+============
+
+*Questions, comments and corrections to westsiderobotics@verizon.net*
+
diff --git a/docs/source/color_processing/color-spaces/images/10-RGB-wheel.png b/docs/source/color_processing/color-spaces/images/10-RGB-wheel.png
new file mode 100644
index 00000000..acaf39cb
Binary files /dev/null and b/docs/source/color_processing/color-spaces/images/10-RGB-wheel.png differ
diff --git a/docs/source/color_processing/color-spaces/images/20-RGB-cube.png b/docs/source/color_processing/color-spaces/images/20-RGB-cube.png
new file mode 100644
index 00000000..fdde3728
Binary files /dev/null and b/docs/source/color_processing/color-spaces/images/20-RGB-cube.png differ
diff --git a/docs/source/color_processing/color-spaces/images/30-HSV-cone.png b/docs/source/color_processing/color-spaces/images/30-HSV-cone.png
new file mode 100644
index 00000000..243757c1
Binary files /dev/null and b/docs/source/color_processing/color-spaces/images/30-HSV-cone.png differ
diff --git a/docs/source/color_processing/color-spaces/images/40-YCrCb.png b/docs/source/color_processing/color-spaces/images/40-YCrCb.png
new file mode 100644
index 00000000..ebd500a4
Binary files /dev/null and b/docs/source/color_processing/color-spaces/images/40-YCrCb.png differ
diff --git a/docs/source/color_processing/index.rst b/docs/source/color_processing/index.rst
new file mode 100644
index 00000000..d8da0cee
--- /dev/null
+++ b/docs/source/color_processing/index.rst
@@ -0,0 +1,84 @@
+Color Processing Introduction
+===============================
+
+Overview
+--------
+
+The *FIRST* Tech Challenge SDK v10.1 software now includes some **Color
+Processing** features from OpenCV, a popular and powerful open-source library
+for vision processing.
+
+Introduced with INTO THE DEEP, these new features will help *FIRST* Tech Challenge teams
+**identify colors** and **process color blobs**\ , useful in any game requiring
+color and shape recognition.
+
+Here's the outline of this tutorial's main pages:
+
+.. toctree::
+ :maxdepth: 1
+ :titlesonly:
+
+ color-sensor/color-sensor
+ color-blob-concepts/color-blob-concepts
+ color-locator-discover/color-locator-discover
+ color-locator-explore/color-locator-explore
+ color-locator-challenge/color-locator-challenge
+ color-spaces/color-spaces
+
+Much credit to developer and Sample OpMode author `@gearsincorg `_, EasyOpenCV developer `@Windwoes `_, FTC Blocks developer `@lizlooney `_, and the open-source team at `OpenCV `_.
+
+Compatibility
+-------------
+
+This new software includes two Color Processors, each compatible with the FTC
+VisionPortal introduced in 2023. These processors can run alongside an
+AprilTag processor, and replace the TensorFlow processor (removed in 2024).
+
+These new processors can be used on the usual FTC cameras:
+
+* any UVC-compatible webcam
+* the built-in camera of an FTC-supported Android phone (as Robot Controller)
+
+This does **not** include vision sensors such as HuskyLens and LimeLight 3A,
+which do not use the FTC VisionPortal.
+
+Two Processors
+--------------
+
+The new software includes these processors:
+
+* **Color Sensor** - detects the color of a specified zone in the camera's
+ view
+* **Color Locator** - gives detailed info on clusters of a specified color,
+ in a specified zone
+
+This tutorial has a :doc:`Color Sensor ` page, showing how
+to use the Sample OpMode called ``ConceptVisionColorSensor``.
+
+For the **Color Locator** processor, the color "clusters" are called **Blobs**.
+As listed above, this tutorial offers one page on Color Blob Concepts, and
+three pages covering the Sample OpMode called ``ConceptVisionColorLocator``.
+
+The Sample OpModes are available in **FTC Blocks**\ , and in **Java** for use
+in OnBot Java or Android Studio. Each programming section of this tutorial has
+a Blocks tab and a Java tab.
+
+Next Steps
+----------
+
+Time to get started!
+
+Following this tutorial in order, first try the Sample OpMode for :doc:`Color
+Sensor `.
+
+Then read about **Color Blob Concepts**\ , and try the **Color Locator** Sample
+OpMode.
+
+Soon you'll be ready to add one or both features to your Autonomous OpModes --
+perhaps even to help automate your TeleOp!
+
+============
+
+Questions, comments and corrections to westsiderobotics@verizon.net
+
+
diff --git a/docs/source/conf.py b/docs/source/conf.py
index f26d3cdf..3842eb44 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -30,6 +30,7 @@
'sphinxcontrib.cookiebanner',
'sphinxcontrib.mermaid',
"sphinxext.rediraffe",
+ "ftcdocs_linkcheckdiff",
]
autosectionlabel_prefix_document = True
diff --git a/docs/source/programming_resources/index.rst b/docs/source/programming_resources/index.rst
index 8faea95a..4c4ecfc9 100644
--- a/docs/source/programming_resources/index.rst
+++ b/docs/source/programming_resources/index.rst
@@ -110,7 +110,23 @@ Learning more about using vision
vision/vision_overview/vision-overview
vision/webcam_controls/index
Camera Calibration
- FTC Color Processing <../color_processing/color-processing-introduction/color-processing-introduction>
+
+Camera Color Processing
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Learn more about using a simple webcam or smartphone camera to perform Color Processing
+
+.. toctree::
+ :maxdepth: 1
+ :titlesonly:
+
+ ../color_processing/index
+ ../color_processing/color-sensor/color-sensor
+ ../color_processing/color-blob-concepts/color-blob-concepts
+ ../color_processing/color-locator-discover/color-locator-discover
+ ../color_processing/color-locator-explore/color-locator-explore
+ ../color_processing/color-locator-challenge/color-locator-challenge
+ ../color_processing/color-spaces/color-spaces
Advanced Topics
~~~~~~~~~~~~~~~~