Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[docker] Add docker with demo file #8

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,16 @@ pip install -e .
* To test on live video: `python face_alignment_test.py [-i webcam_index]`
* To test on a video file: `python face_alignment_test.py [-i input_file] [-o output_file]`

### Docker
You can run a containerized demo using Docker. It will install the needed dependencies and allow you to test the FANPredictor on a sample image.

Either run `bash demo/run.sh` or `docker build -t ibug-face_alignment -f ./demo/Dockerfile . && docker run -it --rm -p 8888:8888 ibug-face_alignment`
You will automatically enter a tmux session and open jupter-lab, you can then open the notebook.ipynb. If you don't want this, you can either kill it immediately or provide "--entrypoint /bin/bash" to the docker run command.

Please install Docker using [the official instructions](https://docs.docker.com/get-docker/)

**NOTE** If you're running on Windows, running the container might fail, and you would need to replace `docker run` with `winpty docker run`. I also recommend running the commands from a Unix-shell, for example Git Bash, or Docker Quickstart Terminal. This is taken care of if you use the shell script.

## How to Use
```python
# Import the libraries
Expand Down
30 changes: 30 additions & 0 deletions demo/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
FROM continuumio/miniconda3:4.12.0

RUN apt update && apt upgrade -y && \
apt install -y \
libgl1 \
lsof \
tmux \
vim

WORKDIR /face_alignment

COPY ../requirements.txt .
RUN pip install -r requirements.txt && \
pip install scikit-image

COPY ../setup.py .
COPY ../ibug ibug
RUN pip install -e .

COPY demo/main.py .

RUN conda install jupyterlab matplotlib ipywidgets

COPY demo/notebook.ipynb .

CMD [ \
"tmux", \
"new-session", "/bin/bash", ";", \
"new-window", "jupyter-lab --ip=0.0.0.0 --no-browser --allow-root" \
]
25 changes: 25 additions & 0 deletions demo/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
import os
import cv2
import torch
from ibug.face_alignment import FANPredictor
from skimage.data import astronaut
import numpy as np

image = astronaut()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
face_cascade = cv2.CascadeClassifier(os.path.join(cv2.data.haarcascades, "haarcascade_frontalface_default.xml"))
detections = face_cascade.detectMultiScale(gray)
assert len(detections) == 1, "Please submit an image with exactly one clear frontal face"
x, y, w, h = detections[0]
detection = np.array([x, y, x+w, y+h])



config = FANPredictor.create_config(gamma = 1.0, radius = 0.1, use_jit = False)

device = "cpu"
if torch.cuda.is_available():
device = torch.cuda.current_device()
fan = FANPredictor(device=device, model=FANPredictor.get_model('2dfan2_alt'), config=config)

landmarks, scores = fan(image, detection)
145 changes: 145 additions & 0 deletions demo/notebook.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,145 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "b3e3e628-e16a-474e-ad32-3edb91a82ba1",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import time\n",
"import io\n",
"from typing import Callable\n",
"from contextlib import contextmanager\n",
"\n",
"import cv2\n",
"import torch\n",
"from skimage.data import astronaut\n",
"import numpy as np\n",
"from matplotlib import pyplot as plt\n",
"from PIL import Image\n",
"import ipywidgets as widgets\n",
"\n",
"from ibug.face_alignment import FANPredictor\n",
"from ibug.face_alignment.utils import plot_landmarks"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "eea2256f-2d81-406f-8d30-6753b026e8a4",
"metadata": {},
"outputs": [],
"source": [
"def detect(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n",
" face_cascade = cv2.CascadeClassifier(os.path.join(cv2.data.haarcascades, \"haarcascade_frontalface_default.xml\"))\n",
" detections = face_cascade.detectMultiScale(gray)\n",
" assert len(detections) == 1, \"Please submit an image with exactly one clear frontal face\"\n",
" x, y, w, h = detections[0]\n",
" return np.array([x, y, x+w, y+h])\n",
"\n",
"def resize(image: np.ndarray, longer_side: int) -> np.ndarray:\n",
" width, height = image.shape[:2]\n",
" largest = max(width, height)\n",
" ratio = longer_side / largest\n",
" return cv2.resize(image, (int(height * ratio), int(width * ratio)))\n",
"\n",
"@contextmanager\n",
"def time_tracking(callback: Callable[[float], None]):\n",
" start = time.perf_counter()\n",
" try:\n",
" yield\n",
" finally:\n",
" end = time.perf_counter()\n",
" callback(end - start)\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f065b832-3d29-473a-aa56-de236f5040e1",
"metadata": {},
"outputs": [],
"source": [
"file_upload = widgets.FileUpload()\n",
"\n",
"display(file_upload)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d18fc123-fcce-40a2-83be-0bf7fd430e99",
"metadata": {},
"outputs": [],
"source": [
"image = astronaut()\n",
"\n",
"for filename, file_info in file_upload.value.items():\n",
" image = np.array(Image.open(io.BytesIO(file_info['content'])))\n",
" \n",
"image = resize(image, 400)\n",
"Image.fromarray(image)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5c15e1d3-2dd5-400d-85f6-46f7a1d88af5",
"metadata": {},
"outputs": [],
"source": [
"with time_tracking(print):\n",
" detection = detect(image)\n",
"print(f\"Detected face at {detection=}\")\n",
"\n",
"config = FANPredictor.create_config(gamma = 1.0, radius = 0.1, use_jit = False)\n",
"\n",
"device = \"cpu\"\n",
"if torch.cuda.is_available():\n",
" device = torch.cuda.current_device()\n",
"\n",
"fan = FANPredictor(device=device, model=FANPredictor.get_model('2dfan2_alt'), config=config)\n",
"with time_tracking(print):\n",
" landmarks, scores = fan(image, detection)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "afef425a-84d1-4ee2-b14b-8ee8bbbd21f6",
"metadata": {},
"outputs": [],
"source": [
"vis = image.copy()\n",
"plot_landmarks(vis, landmarks[0])\n",
"\n",
"Image.fromarray(vis)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.12"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
16 changes: 16 additions & 0 deletions demo/run.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
if ! [ "$(basename $PWD)" = "face_alignment" ]
then
echo "Please run from the base directory of this repo."
exit 1
fi

docker build -t ibug-face_alignment -f ./demo/Dockerfile .

prefix=""
if [ $(uname | grep -iE "(mingw|cygwin)") ]
then
prefix="winpty"
fi

echo $prefix
$prefix docker run -it --rm -p 8888:8888 ibug-face_alignment