Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix No module named psutil issue. #234

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,5 @@
weights
build/
*.egg-info/
gradio_cached_examples
gradio_cached_examples
output/
12 changes: 6 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -60,29 +60,29 @@ Then, you can run the scripts to try the everything mode and three prompt modes.

```shell
# Everything mode
python Inference.py --model_path ./weights/FastSAM.pt --img_path ./images/dogs.jpg
python Inference.py --model_path ./weights/FastSAM-x.pt --img_path ./images/dogs.jpg
```

```shell
# Text prompt
python Inference.py --model_path ./weights/FastSAM.pt --img_path ./images/dogs.jpg --text_prompt "the yellow dog"
python Inference.py --model_path ./weights/FastSAM-x.pt --img_path ./images/dogs.jpg --text_prompt "the yellow dog"
```

```shell
# Box prompt (xywh)
python Inference.py --model_path ./weights/FastSAM.pt --img_path ./images/dogs.jpg --box_prompt "[[570,200,230,400]]"
python Inference.py --model_path ./weights/FastSAM-x.pt --img_path ./images/dogs.jpg --box_prompt "[[570,200,230,400]]"
```

```shell
# Points prompt
python Inference.py --model_path ./weights/FastSAM.pt --img_path ./images/dogs.jpg --point_prompt "[[520,360],[620,300]]" --point_label "[1,0]"
python Inference.py --model_path ./weights/FastSAM-x.pt --img_path ./images/dogs.jpg --point_prompt "[[520,360],[620,300]]" --point_label "[1,0]"
```

You can use the following code to generate all masks and visualize the results.
```shell
from fastsam import FastSAM, FastSAMPrompt

model = FastSAM('./weights/FastSAM.pt')
model = FastSAM('./weights/FastSAM-x.pt')
IMAGE_PATH = './images/dogs.jpg'
DEVICE = 'cpu'
everything_results = model(IMAGE_PATH, device=DEVICE, retina_masks=True, imgsz=1024, conf=0.4, iou=0.9,)
Expand Down Expand Up @@ -129,7 +129,7 @@ Training from scratch or validation: [Training and Validation Code](https://gith
- We also provide a UI for testing our method that is built with gradio. You can upload a custom image, select the mode and set the parameters, click the segment button, and get a satisfactory segmentation result. Currently, the UI supports interaction with the 'Everything mode' and 'points mode'. We plan to add support for additional modes in the future. Running the following command in a terminal will launch the demo:

```
# Download the pre-trained model in "./weights/FastSAM.pt"
# Download the pre-trained model in "./weights/FastSAM-x.pt"
python app_gradio.py
```

Expand Down
107 changes: 67 additions & 40 deletions app_gradio.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import numpy as np

# Load the pre-trained model
model = YOLO('./weights/FastSAM.pt')
model = YOLO('./weights/FastSAM-x.pt')

device = torch.device(
"cuda"
Expand All @@ -28,7 +28,7 @@
🔥 2023/06/26: Support the points mode. (Better and faster interaction will come soon!)

🔥 2023/06/24: Add the 'Advanced options" in Everything mode to get a more detailed adjustment.
"""
"""

description_e = """This is a demo on Github project 🏃 [Fast Segment Anything Model](https://github.com/CASIA-IVA-Lab/FastSAM). Welcome to give a star ⭐️ to it.

Expand Down Expand Up @@ -71,7 +71,7 @@

def segment_everything(
input,
input_size=1024,
input_size=1024,
iou_threshold=0.7,
conf_threshold=0.25,
better_quality=False,
Expand All @@ -98,11 +98,12 @@ def segment_everything(

if len(text) > 0:
results = format_results(results[0], 0)
annotations, _ = text_prompt(results, text, input, device=device, wider=wider)
annotations, _ = text_prompt(
results, text, input, device=device, wider=wider)
annotations = np.array([annotations])
else:
annotations = results[0].masks.data

fig = fast_process(annotations=annotations,
image=input,
device=device,
Expand All @@ -117,7 +118,7 @@ def segment_everything(

def segment_with_points(
input,
input_size=1024,
input_size=1024,
iou_threshold=0.7,
conf_threshold=0.25,
better_quality=False,
Expand All @@ -127,26 +128,28 @@ def segment_with_points(
):
global global_points
global global_point_label

input_size = int(input_size) # 确保 imgsz 是整数
# Thanks for the suggestion by hysts in HuggingFace.
w, h = input.size
scale = input_size / max(w, h)
new_w = int(w * scale)
new_h = int(h * scale)
input = input.resize((new_w, new_h))

scaled_points = [[int(x * scale) for x in point] for point in global_points]

scaled_points = [[int(x * scale) for x in point]
for point in global_points]

results = model(input,
device=device,
retina_masks=True,
iou=iou_threshold,
conf=conf_threshold,
imgsz=input_size,)

results = format_results(results[0], 0)
annotations, _ = point_prompt(results, scaled_points, global_point_label, new_h, new_w)
annotations, _ = point_prompt(
results, scaled_points, global_point_label, new_h, new_w)
annotations = np.array([annotations])

fig = fast_process(annotations=annotations,
Expand All @@ -169,25 +172,31 @@ def get_points_with_draw(image, label, evt: gr.SelectData):
global global_point_label

x, y = evt.index[0], evt.index[1]
point_radius, point_color = 15, (255, 255, 0) if label == 'Add Mask' else (255, 0, 255)
point_radius, point_color = 15, (255, 255, 0) if label == 'Add Mask' else (
255, 0, 255)
global_points.append([x, y])
global_point_label.append(1 if label == 'Add Mask' else 0)

print(x, y, label == 'Add Mask')

# 创建一个可以在图像上绘图的对象
draw = ImageDraw.Draw(image)
draw.ellipse([(x - point_radius, y - point_radius), (x + point_radius, y + point_radius)], fill=point_color)
draw.ellipse([(x - point_radius, y - point_radius),
(x + point_radius, y + point_radius)], fill=point_color)
return image


cond_img_e = gr.Image(label="Input", value=default_example[0], type='pil')
cond_img_p = gr.Image(label="Input with points", value=default_example[0], type='pil')
cond_img_t = gr.Image(label="Input with text", value="examples/dogs.jpg", type='pil')
cond_img_p = gr.Image(label="Input with points",
value=default_example[0], type='pil')
cond_img_t = gr.Image(label="Input with text",
value="examples/dogs.jpg", type='pil')

segm_img_e = gr.Image(label="Segmented Image", interactive=False, type='pil')
segm_img_p = gr.Image(label="Segmented Image with points", interactive=False, type='pil')
segm_img_t = gr.Image(label="Segmented Image with text", interactive=False, type='pil')
segm_img_p = gr.Image(label="Segmented Image with points",
interactive=False, type='pil')
segm_img_t = gr.Image(label="Segmented Image with text",
interactive=False, type='pil')

global_points = []
global_point_label = []
Expand Down Expand Up @@ -224,10 +233,12 @@ def get_points_with_draw(image, label, evt: gr.SelectData):
input_size_slider.render()

with gr.Row():
contour_check = gr.Checkbox(value=True, label='withContours', info='draw the edges of the masks')
contour_check = gr.Checkbox(
value=True, label='withContours', info='draw the edges of the masks')

with gr.Column():
segment_btn_e = gr.Button("Segment Everything", variant='primary')
segment_btn_e = gr.Button(
"Segment Everything", variant='primary')
clear_btn_e = gr.Button("Clear", variant="secondary")

gr.Markdown("Try some of the examples below ⬇️")
Expand All @@ -240,12 +251,16 @@ def get_points_with_draw(image, label, evt: gr.SelectData):

with gr.Column():
with gr.Accordion("Advanced options", open=False):
iou_threshold = gr.Slider(0.1, 0.9, 0.7, step=0.1, label='iou', info='iou threshold for filtering the annotations')
conf_threshold = gr.Slider(0.1, 0.9, 0.25, step=0.05, label='conf', info='object confidence threshold')
iou_threshold = gr.Slider(
0.1, 0.9, 0.7, step=0.1, label='iou', info='iou threshold for filtering the annotations')
conf_threshold = gr.Slider(
0.1, 0.9, 0.25, step=0.05, label='conf', info='object confidence threshold')
with gr.Row():
mor_check = gr.Checkbox(value=False, label='better_visual_quality', info='better quality using morphologyEx')
mor_check = gr.Checkbox(
value=False, label='better_visual_quality', info='better quality using morphologyEx')
with gr.Column():
retina_check = gr.Checkbox(value=True, label='use_retina', info='draw high-resolution segmentation masks')
retina_check = gr.Checkbox(
value=True, label='use_retina', info='draw high-resolution segmentation masks')

# Description
gr.Markdown(description_e)
Expand All @@ -270,16 +285,19 @@ def get_points_with_draw(image, label, evt: gr.SelectData):

with gr.Column(scale=1):
segm_img_p.render()

# Submit & Clear
with gr.Row():
with gr.Column():
with gr.Row():
add_or_remove = gr.Radio(["Add Mask", "Remove Area"], value="Add Mask", label="Point_label (foreground/background)")
add_or_remove = gr.Radio(
["Add Mask", "Remove Area"], value="Add Mask", label="Point_label (foreground/background)")

with gr.Column():
segment_btn_p = gr.Button("Segment with points prompt", variant='primary')
clear_btn_p = gr.Button("Clear points", variant='secondary')
segment_btn_p = gr.Button(
"Segment with points prompt", variant='primary')
clear_btn_p = gr.Button(
"Clear points", variant='secondary')

gr.Markdown("Try some of the examples below ⬇️")
gr.Examples(examples=examples,
Expand All @@ -293,7 +311,8 @@ def get_points_with_draw(image, label, evt: gr.SelectData):
# Description
gr.Markdown(description_p)

cond_img_p.select(get_points_with_draw, [cond_img_p, add_or_remove], cond_img_p)
cond_img_p.select(get_points_with_draw, [
cond_img_p, add_or_remove], cond_img_p)

segment_btn_p.click(segment_with_points,
inputs=[cond_img_p],
Expand All @@ -319,11 +338,14 @@ def get_points_with_draw(image, label, evt: gr.SelectData):
info='Our model was trained on a size of 1024')
with gr.Row():
with gr.Column():
contour_check = gr.Checkbox(value=True, label='withContours', info='draw the edges of the masks')
text_box = gr.Textbox(label="text prompt", value="a black dog")
contour_check = gr.Checkbox(
value=True, label='withContours', info='draw the edges of the masks')
text_box = gr.Textbox(
label="text prompt", value="a black dog")

with gr.Column():
segment_btn_t = gr.Button("Segment with text", variant='primary')
segment_btn_t = gr.Button(
"Segment with text", variant='primary')
clear_btn_t = gr.Button("Clear", variant="secondary")

gr.Markdown("Try some of the examples below ⬇️")
Expand All @@ -336,16 +358,21 @@ def get_points_with_draw(image, label, evt: gr.SelectData):

with gr.Column():
with gr.Accordion("Advanced options", open=False):
iou_threshold = gr.Slider(0.1, 0.9, 0.7, step=0.1, label='iou', info='iou threshold for filtering the annotations')
conf_threshold = gr.Slider(0.1, 0.9, 0.25, step=0.05, label='conf', info='object confidence threshold')
iou_threshold = gr.Slider(
0.1, 0.9, 0.7, step=0.1, label='iou', info='iou threshold for filtering the annotations')
conf_threshold = gr.Slider(
0.1, 0.9, 0.25, step=0.05, label='conf', info='object confidence threshold')
with gr.Row():
mor_check = gr.Checkbox(value=False, label='better_visual_quality', info='better quality using morphologyEx')
retina_check = gr.Checkbox(value=True, label='use_retina', info='draw high-resolution segmentation masks')
wider_check = gr.Checkbox(value=False, label='wider', info='wider result')
mor_check = gr.Checkbox(
value=False, label='better_visual_quality', info='better quality using morphologyEx')
retina_check = gr.Checkbox(
value=True, label='use_retina', info='draw high-resolution segmentation masks')
wider_check = gr.Checkbox(
value=False, label='wider', info='wider result')

# Description
gr.Markdown(description_e)

segment_btn_t.click(segment_everything,
inputs=[
cond_img_t,
Expand All @@ -362,7 +389,7 @@ def get_points_with_draw(image, label, evt: gr.SelectData):

def clear():
return None, None

def clear_text():
return None, None, None

Expand Down
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ pandas>=1.1.4
seaborn>=0.11.0

gradio==3.35.2
psutil==6.0.0

# Ultralytics-----------------------------------
# ultralytics == 8.0.120
Expand Down