├── MANIFEST.in
├── examples
├── classification
│ ├── flags.txt
│ ├── data_annotated
│ │ ├── 0001.jpg
│ │ ├── 0002.jpg
│ │ ├── 0001.json
│ │ └── 0002.json
│ ├── .readme
│ │ ├── annotation_cat.jpg
│ │ └── annotation_dog.jpg
│ └── README.md
├── video_annotation
│ ├── labelme2voc.py
│ ├── data_dataset_voc
│ │ ├── class_names.txt
│ │ ├── JPEGImages
│ │ │ ├── 00000100.jpg
│ │ │ ├── 00000101.jpg
│ │ │ ├── 00000102.jpg
│ │ │ ├── 00000103.jpg
│ │ │ └── 00000104.jpg
│ │ ├── SegmentationClass
│ │ │ ├── 00000100.npy
│ │ │ ├── 00000101.npy
│ │ │ ├── 00000102.npy
│ │ │ ├── 00000103.npy
│ │ │ └── 00000104.npy
│ │ ├── SegmentationClassPNG
│ │ │ ├── 00000100.png
│ │ │ ├── 00000101.png
│ │ │ ├── 00000102.png
│ │ │ ├── 00000103.png
│ │ │ └── 00000104.png
│ │ └── SegmentationClassVisualization
│ │ │ ├── 00000100.jpg
│ │ │ ├── 00000101.jpg
│ │ │ ├── 00000102.jpg
│ │ │ ├── 00000103.jpg
│ │ │ └── 00000104.jpg
│ ├── labels.txt
│ ├── .readme
│ │ ├── 00000100.jpg
│ │ ├── 00000101.jpg
│ │ └── data_annotated.gif
│ ├── data_annotated
│ │ ├── 00000100.jpg
│ │ ├── 00000101.jpg
│ │ ├── 00000102.jpg
│ │ ├── 00000103.jpg
│ │ ├── 00000104.jpg
│ │ ├── 00000100.json
│ │ ├── 00000101.json
│ │ ├── 00000102.json
│ │ ├── 00000103.json
│ │ └── 00000104.json
│ └── README.md
├── primitives
│ ├── primitives.jpg
│ └── primitives.json
├── tutorial
│ ├── apc2016_obj3.jpg
│ ├── .readme
│ │ ├── annotation.jpg
│ │ ├── draw_json.jpg
│ │ └── draw_label_png.jpg
│ ├── apc2016_obj3_json
│ │ ├── img.png
│ │ ├── label.png
│ │ ├── label_viz.png
│ │ ├── label_names.txt
│ │ └── info.yaml
│ ├── load_label_png.py
│ └── README.md
├── bbox_detection
│ ├── .readme
│ │ └── annotation.jpg
│ ├── data_annotated
│ │ ├── 2011_000003.jpg
│ │ ├── 2011_000006.jpg
│ │ ├── 2011_000025.jpg
│ │ ├── 2011_000003.json
│ │ ├── 2011_000025.json
│ │ └── 2011_000006.json
│ ├── data_dataset_voc
│ │ ├── JPEGImages
│ │ │ ├── 2011_000003.jpg
│ │ │ ├── 2011_000006.jpg
│ │ │ └── 2011_000025.jpg
│ │ ├── AnnotationsVisualization
│ │ │ ├── 2011_000003.jpg
│ │ │ ├── 2011_000006.jpg
│ │ │ └── 2011_000025.jpg
│ │ ├── class_names.txt
│ │ └── Annotations
│ │ │ ├── 2011_000003.xml
│ │ │ ├── 2011_000025.xml
│ │ │ └── 2011_000006.xml
│ ├── labels.txt
│ ├── README.md
│ └── labelme2voc.py
├── instance_segmentation
│ ├── .readme
│ │ ├── annotation.jpg
│ │ ├── draw_label_png_class.jpg
│ │ └── draw_label_png_object.jpg
│ ├── data_annotated
│ │ ├── 2011_000003.jpg
│ │ ├── 2011_000006.jpg
│ │ ├── 2011_000025.jpg
│ │ ├── 2011_000025.json
│ │ └── 2011_000003.json
│ ├── data_dataset_voc
│ │ ├── JPEGImages
│ │ │ ├── 2011_000003.jpg
│ │ │ ├── 2011_000006.jpg
│ │ │ └── 2011_000025.jpg
│ │ ├── SegmentationClass
│ │ │ ├── 2011_000003.npy
│ │ │ ├── 2011_000006.npy
│ │ │ └── 2011_000025.npy
│ │ ├── SegmentationObject
│ │ │ ├── 2011_000003.npy
│ │ │ ├── 2011_000006.npy
│ │ │ └── 2011_000025.npy
│ │ ├── SegmentationClassPNG
│ │ │ ├── 2011_000003.png
│ │ │ ├── 2011_000006.png
│ │ │ └── 2011_000025.png
│ │ ├── SegmentationObjectPNG
│ │ │ ├── 2011_000003.png
│ │ │ ├── 2011_000006.png
│ │ │ └── 2011_000025.png
│ │ ├── SegmentationClassVisualization
│ │ │ ├── 2011_000003.jpg
│ │ │ ├── 2011_000006.jpg
│ │ │ └── 2011_000025.jpg
│ │ ├── SegmentationObjectVisualization
│ │ │ ├── 2011_000003.jpg
│ │ │ ├── 2011_000006.jpg
│ │ │ └── 2011_000025.jpg
│ │ └── class_names.txt
│ ├── labels.txt
│ ├── README.md
│ └── labelme2voc.py
└── semantic_segmentation
│ ├── .readme
│ ├── annotation.jpg
│ └── draw_label_png.jpg
│ ├── data_annotated
│ ├── 2011_000003.jpg
│ ├── 2011_000006.jpg
│ ├── 2011_000025.jpg
│ ├── 2011_000025.json
│ └── 2011_000003.json
│ ├── data_dataset_voc
│ ├── JPEGImages
│ │ ├── 2011_000003.jpg
│ │ ├── 2011_000006.jpg
│ │ └── 2011_000025.jpg
│ ├── SegmentationClass
│ │ ├── 2011_000003.npy
│ │ ├── 2011_000006.npy
│ │ └── 2011_000025.npy
│ ├── SegmentationClassPNG
│ │ ├── 2011_000003.png
│ │ ├── 2011_000006.png
│ │ └── 2011_000025.png
│ ├── SegmentationClassVisualization
│ │ ├── 2011_000003.jpg
│ │ ├── 2011_000006.jpg
│ │ └── 2011_000025.jpg
│ └── class_names.txt
│ ├── labels.txt
│ ├── README.md
│ └── labelme2voc.py
├── tests
├── data
│ ├── apc2016_obj3.jpg
│ └── apc2016_obj3.json
├── test_app.py
├── widgets_tests
│ └── test_label_dialog.py
└── test_utils.py
├── setup.cfg
├── labelme
├── icons
│ ├── eye.png
│ ├── fit.png
│ ├── new.png
│ ├── close.png
│ ├── color.png
│ ├── copy.png
│ ├── done.png
│ ├── edit.png
│ ├── file.png
│ ├── help.png
│ ├── icon.icns
│ ├── icon.ico
│ ├── icon.png
│ ├── next.png
│ ├── open.png
│ ├── prev.png
│ ├── quit.png
│ ├── save.png
│ ├── undo.png
│ ├── zoom.png
│ ├── cancel.png
│ ├── delete.png
│ ├── expert.png
│ ├── labels.png
│ ├── objects.png
│ ├── save-as.png
│ ├── zoom-in.png
│ ├── zoom-out.png
│ ├── color-line.png
│ ├── fit-width.png
│ ├── fit-window.png
│ ├── undo-cross.png
│ └── feBlend-icon.png
├── cli
│ ├── __init__.py
│ ├── draw_label_png.py
│ ├── draw_json.py
│ ├── json_to_dataset.py
│ └── on_docker.py
├── widgets
│ ├── escapable_qlist_widget.py
│ ├── __init__.py
│ ├── zoom_widget.py
│ ├── tool_bar.py
│ ├── color_dialog.py
│ ├── label_qlist_widget.py
│ └── label_dialog.py
├── _version.py
├── __init__.py
├── utils
│ ├── image.py
│ ├── __init__.py
│ ├── _io.py
│ ├── qt.py
│ ├── shape.py
│ └── draw.py
├── testing.py
├── config
│ ├── default_config.yaml
│ └── __init__.py
├── main.py
├── label_file.py
└── shape.py
├── .gitmodules
├── .gitignore
├── LICENSE
├── docker
└── Dockerfile
├── labelme.spec
├── setup.py
├── .travis.yml
└── README.md
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include README.md
2 |
--------------------------------------------------------------------------------
/examples/classification/flags.txt:
--------------------------------------------------------------------------------
1 | __ignore__
2 | cat
3 | dog
4 |
--------------------------------------------------------------------------------
/tests/data/apc2016_obj3.jpg:
--------------------------------------------------------------------------------
1 | ../../examples/tutorial/apc2016_obj3.jpg
--------------------------------------------------------------------------------
/tests/data/apc2016_obj3.json:
--------------------------------------------------------------------------------
1 | ../../examples/tutorial/apc2016_obj3.json
--------------------------------------------------------------------------------
/examples/video_annotation/labelme2voc.py:
--------------------------------------------------------------------------------
1 | ../semantic_segmentation/labelme2voc.py
--------------------------------------------------------------------------------
/examples/video_annotation/data_dataset_voc/class_names.txt:
--------------------------------------------------------------------------------
1 | _background_
2 | car
3 | track
--------------------------------------------------------------------------------
/examples/video_annotation/labels.txt:
--------------------------------------------------------------------------------
1 | __ignore__
2 | _background_
3 | car
4 | track
5 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude = .anaconda3/*,.anaconda2/*,venv/*
3 | ignore = H304
4 |
--------------------------------------------------------------------------------
/labelme/icons/eye.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/eye.png
--------------------------------------------------------------------------------
/labelme/icons/fit.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/fit.png
--------------------------------------------------------------------------------
/labelme/icons/new.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/new.png
--------------------------------------------------------------------------------
/labelme/icons/close.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/close.png
--------------------------------------------------------------------------------
/labelme/icons/color.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/color.png
--------------------------------------------------------------------------------
/labelme/icons/copy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/copy.png
--------------------------------------------------------------------------------
/labelme/icons/done.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/done.png
--------------------------------------------------------------------------------
/labelme/icons/edit.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/edit.png
--------------------------------------------------------------------------------
/labelme/icons/file.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/file.png
--------------------------------------------------------------------------------
/labelme/icons/help.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/help.png
--------------------------------------------------------------------------------
/labelme/icons/icon.icns:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/icon.icns
--------------------------------------------------------------------------------
/labelme/icons/icon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/icon.ico
--------------------------------------------------------------------------------
/labelme/icons/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/icon.png
--------------------------------------------------------------------------------
/labelme/icons/next.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/next.png
--------------------------------------------------------------------------------
/labelme/icons/open.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/open.png
--------------------------------------------------------------------------------
/labelme/icons/prev.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/prev.png
--------------------------------------------------------------------------------
/labelme/icons/quit.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/quit.png
--------------------------------------------------------------------------------
/labelme/icons/save.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/save.png
--------------------------------------------------------------------------------
/labelme/icons/undo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/undo.png
--------------------------------------------------------------------------------
/labelme/icons/zoom.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/zoom.png
--------------------------------------------------------------------------------
/labelme/icons/cancel.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/cancel.png
--------------------------------------------------------------------------------
/labelme/icons/delete.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/delete.png
--------------------------------------------------------------------------------
/labelme/icons/expert.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/expert.png
--------------------------------------------------------------------------------
/labelme/icons/labels.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/labels.png
--------------------------------------------------------------------------------
/labelme/icons/objects.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/objects.png
--------------------------------------------------------------------------------
/labelme/icons/save-as.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/save-as.png
--------------------------------------------------------------------------------
/labelme/icons/zoom-in.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/zoom-in.png
--------------------------------------------------------------------------------
/labelme/icons/zoom-out.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/zoom-out.png
--------------------------------------------------------------------------------
/labelme/icons/color-line.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/color-line.png
--------------------------------------------------------------------------------
/labelme/icons/fit-width.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/fit-width.png
--------------------------------------------------------------------------------
/labelme/icons/fit-window.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/fit-window.png
--------------------------------------------------------------------------------
/labelme/icons/undo-cross.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/undo-cross.png
--------------------------------------------------------------------------------
/labelme/icons/feBlend-icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/labelme/icons/feBlend-icon.png
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "github2pypi"]
2 | path = github2pypi
3 | url = https://github.com/wkentaro/github2pypi.git
4 |
--------------------------------------------------------------------------------
/examples/primitives/primitives.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/primitives/primitives.jpg
--------------------------------------------------------------------------------
/examples/tutorial/apc2016_obj3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/tutorial/apc2016_obj3.jpg
--------------------------------------------------------------------------------
/examples/tutorial/.readme/annotation.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/tutorial/.readme/annotation.jpg
--------------------------------------------------------------------------------
/examples/tutorial/.readme/draw_json.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/tutorial/.readme/draw_json.jpg
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | /.cache/
2 | /.pytest_cache/
3 |
4 | /build/
5 | /dist/
6 | /*.egg-info/
7 |
8 | *.py[cdo]
9 |
10 | .DS_Store
11 |
--------------------------------------------------------------------------------
/examples/tutorial/apc2016_obj3_json/img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/tutorial/apc2016_obj3_json/img.png
--------------------------------------------------------------------------------
/examples/bbox_detection/.readme/annotation.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/bbox_detection/.readme/annotation.jpg
--------------------------------------------------------------------------------
/examples/tutorial/.readme/draw_label_png.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/tutorial/.readme/draw_label_png.jpg
--------------------------------------------------------------------------------
/examples/tutorial/apc2016_obj3_json/label.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/tutorial/apc2016_obj3_json/label.png
--------------------------------------------------------------------------------
/examples/video_annotation/.readme/00000100.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/.readme/00000100.jpg
--------------------------------------------------------------------------------
/examples/video_annotation/.readme/00000101.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/.readme/00000101.jpg
--------------------------------------------------------------------------------
/examples/classification/data_annotated/0001.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/classification/data_annotated/0001.jpg
--------------------------------------------------------------------------------
/examples/classification/data_annotated/0002.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/classification/data_annotated/0002.jpg
--------------------------------------------------------------------------------
/examples/classification/.readme/annotation_cat.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/classification/.readme/annotation_cat.jpg
--------------------------------------------------------------------------------
/examples/classification/.readme/annotation_dog.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/classification/.readme/annotation_dog.jpg
--------------------------------------------------------------------------------
/examples/tutorial/apc2016_obj3_json/label_viz.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/tutorial/apc2016_obj3_json/label_viz.png
--------------------------------------------------------------------------------
/examples/instance_segmentation/.readme/annotation.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/.readme/annotation.jpg
--------------------------------------------------------------------------------
/examples/semantic_segmentation/.readme/annotation.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/semantic_segmentation/.readme/annotation.jpg
--------------------------------------------------------------------------------
/examples/video_annotation/.readme/data_annotated.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/.readme/data_annotated.gif
--------------------------------------------------------------------------------
/examples/video_annotation/data_annotated/00000100.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/data_annotated/00000100.jpg
--------------------------------------------------------------------------------
/examples/video_annotation/data_annotated/00000101.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/data_annotated/00000101.jpg
--------------------------------------------------------------------------------
/examples/video_annotation/data_annotated/00000102.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/data_annotated/00000102.jpg
--------------------------------------------------------------------------------
/examples/video_annotation/data_annotated/00000103.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/data_annotated/00000103.jpg
--------------------------------------------------------------------------------
/examples/video_annotation/data_annotated/00000104.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/data_annotated/00000104.jpg
--------------------------------------------------------------------------------
/examples/bbox_detection/data_annotated/2011_000003.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/bbox_detection/data_annotated/2011_000003.jpg
--------------------------------------------------------------------------------
/examples/bbox_detection/data_annotated/2011_000006.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/bbox_detection/data_annotated/2011_000006.jpg
--------------------------------------------------------------------------------
/examples/bbox_detection/data_annotated/2011_000025.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/bbox_detection/data_annotated/2011_000025.jpg
--------------------------------------------------------------------------------
/examples/semantic_segmentation/.readme/draw_label_png.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/semantic_segmentation/.readme/draw_label_png.jpg
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_annotated/2011_000003.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/data_annotated/2011_000003.jpg
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_annotated/2011_000006.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/data_annotated/2011_000006.jpg
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_annotated/2011_000025.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/data_annotated/2011_000025.jpg
--------------------------------------------------------------------------------
/examples/semantic_segmentation/data_annotated/2011_000003.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/semantic_segmentation/data_annotated/2011_000003.jpg
--------------------------------------------------------------------------------
/examples/semantic_segmentation/data_annotated/2011_000006.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/semantic_segmentation/data_annotated/2011_000006.jpg
--------------------------------------------------------------------------------
/examples/semantic_segmentation/data_annotated/2011_000025.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/semantic_segmentation/data_annotated/2011_000025.jpg
--------------------------------------------------------------------------------
/examples/tutorial/apc2016_obj3_json/label_names.txt:
--------------------------------------------------------------------------------
1 | _background_
2 | shelf
3 | highland_6539_self_stick_notes
4 | mead_index_cards
5 | kong_air_dog_squeakair_tennis_ball
6 |
--------------------------------------------------------------------------------
/labelme/cli/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 |
3 | from . import draw_json
4 | from . import draw_label_png
5 | from . import json_to_dataset
6 | from . import on_docker
7 |
--------------------------------------------------------------------------------
/examples/instance_segmentation/.readme/draw_label_png_class.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/.readme/draw_label_png_class.jpg
--------------------------------------------------------------------------------
/examples/instance_segmentation/.readme/draw_label_png_object.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/.readme/draw_label_png_object.jpg
--------------------------------------------------------------------------------
/examples/video_annotation/data_dataset_voc/JPEGImages/00000100.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/data_dataset_voc/JPEGImages/00000100.jpg
--------------------------------------------------------------------------------
/examples/video_annotation/data_dataset_voc/JPEGImages/00000101.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/data_dataset_voc/JPEGImages/00000101.jpg
--------------------------------------------------------------------------------
/examples/video_annotation/data_dataset_voc/JPEGImages/00000102.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/data_dataset_voc/JPEGImages/00000102.jpg
--------------------------------------------------------------------------------
/examples/video_annotation/data_dataset_voc/JPEGImages/00000103.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/data_dataset_voc/JPEGImages/00000103.jpg
--------------------------------------------------------------------------------
/examples/video_annotation/data_dataset_voc/JPEGImages/00000104.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/data_dataset_voc/JPEGImages/00000104.jpg
--------------------------------------------------------------------------------
/examples/bbox_detection/data_dataset_voc/JPEGImages/2011_000003.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/bbox_detection/data_dataset_voc/JPEGImages/2011_000003.jpg
--------------------------------------------------------------------------------
/examples/bbox_detection/data_dataset_voc/JPEGImages/2011_000006.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/bbox_detection/data_dataset_voc/JPEGImages/2011_000006.jpg
--------------------------------------------------------------------------------
/examples/bbox_detection/data_dataset_voc/JPEGImages/2011_000025.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/bbox_detection/data_dataset_voc/JPEGImages/2011_000025.jpg
--------------------------------------------------------------------------------
/examples/tutorial/apc2016_obj3_json/info.yaml:
--------------------------------------------------------------------------------
1 | label_names:
2 | - _background_
3 | - shelf
4 | - highland_6539_self_stick_notes
5 | - mead_index_cards
6 | - kong_air_dog_squeakair_tennis_ball
7 |
--------------------------------------------------------------------------------
/examples/video_annotation/data_dataset_voc/SegmentationClass/00000100.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/data_dataset_voc/SegmentationClass/00000100.npy
--------------------------------------------------------------------------------
/examples/video_annotation/data_dataset_voc/SegmentationClass/00000101.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/data_dataset_voc/SegmentationClass/00000101.npy
--------------------------------------------------------------------------------
/examples/video_annotation/data_dataset_voc/SegmentationClass/00000102.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/data_dataset_voc/SegmentationClass/00000102.npy
--------------------------------------------------------------------------------
/examples/video_annotation/data_dataset_voc/SegmentationClass/00000103.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/data_dataset_voc/SegmentationClass/00000103.npy
--------------------------------------------------------------------------------
/examples/video_annotation/data_dataset_voc/SegmentationClass/00000104.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/data_dataset_voc/SegmentationClass/00000104.npy
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_dataset_voc/JPEGImages/2011_000003.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/data_dataset_voc/JPEGImages/2011_000003.jpg
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_dataset_voc/JPEGImages/2011_000006.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/data_dataset_voc/JPEGImages/2011_000006.jpg
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_dataset_voc/JPEGImages/2011_000025.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/data_dataset_voc/JPEGImages/2011_000025.jpg
--------------------------------------------------------------------------------
/examples/semantic_segmentation/data_dataset_voc/JPEGImages/2011_000003.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/semantic_segmentation/data_dataset_voc/JPEGImages/2011_000003.jpg
--------------------------------------------------------------------------------
/examples/semantic_segmentation/data_dataset_voc/JPEGImages/2011_000006.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/semantic_segmentation/data_dataset_voc/JPEGImages/2011_000006.jpg
--------------------------------------------------------------------------------
/examples/semantic_segmentation/data_dataset_voc/JPEGImages/2011_000025.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/semantic_segmentation/data_dataset_voc/JPEGImages/2011_000025.jpg
--------------------------------------------------------------------------------
/examples/video_annotation/data_dataset_voc/SegmentationClassPNG/00000100.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/data_dataset_voc/SegmentationClassPNG/00000100.png
--------------------------------------------------------------------------------
/examples/video_annotation/data_dataset_voc/SegmentationClassPNG/00000101.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/data_dataset_voc/SegmentationClassPNG/00000101.png
--------------------------------------------------------------------------------
/examples/video_annotation/data_dataset_voc/SegmentationClassPNG/00000102.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/data_dataset_voc/SegmentationClassPNG/00000102.png
--------------------------------------------------------------------------------
/examples/video_annotation/data_dataset_voc/SegmentationClassPNG/00000103.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/data_dataset_voc/SegmentationClassPNG/00000103.png
--------------------------------------------------------------------------------
/examples/video_annotation/data_dataset_voc/SegmentationClassPNG/00000104.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/data_dataset_voc/SegmentationClassPNG/00000104.png
--------------------------------------------------------------------------------
/examples/bbox_detection/data_dataset_voc/AnnotationsVisualization/2011_000003.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/bbox_detection/data_dataset_voc/AnnotationsVisualization/2011_000003.jpg
--------------------------------------------------------------------------------
/examples/bbox_detection/data_dataset_voc/AnnotationsVisualization/2011_000006.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/bbox_detection/data_dataset_voc/AnnotationsVisualization/2011_000006.jpg
--------------------------------------------------------------------------------
/examples/bbox_detection/data_dataset_voc/AnnotationsVisualization/2011_000025.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/bbox_detection/data_dataset_voc/AnnotationsVisualization/2011_000025.jpg
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_dataset_voc/SegmentationClass/2011_000003.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/data_dataset_voc/SegmentationClass/2011_000003.npy
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_dataset_voc/SegmentationClass/2011_000006.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/data_dataset_voc/SegmentationClass/2011_000006.npy
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_dataset_voc/SegmentationClass/2011_000025.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/data_dataset_voc/SegmentationClass/2011_000025.npy
--------------------------------------------------------------------------------
/examples/semantic_segmentation/data_dataset_voc/SegmentationClass/2011_000003.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/semantic_segmentation/data_dataset_voc/SegmentationClass/2011_000003.npy
--------------------------------------------------------------------------------
/examples/semantic_segmentation/data_dataset_voc/SegmentationClass/2011_000006.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/semantic_segmentation/data_dataset_voc/SegmentationClass/2011_000006.npy
--------------------------------------------------------------------------------
/examples/semantic_segmentation/data_dataset_voc/SegmentationClass/2011_000025.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/semantic_segmentation/data_dataset_voc/SegmentationClass/2011_000025.npy
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_dataset_voc/SegmentationObject/2011_000003.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/data_dataset_voc/SegmentationObject/2011_000003.npy
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_dataset_voc/SegmentationObject/2011_000006.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/data_dataset_voc/SegmentationObject/2011_000006.npy
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_dataset_voc/SegmentationObject/2011_000025.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/data_dataset_voc/SegmentationObject/2011_000025.npy
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_dataset_voc/SegmentationClassPNG/2011_000003.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/data_dataset_voc/SegmentationClassPNG/2011_000003.png
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_dataset_voc/SegmentationClassPNG/2011_000006.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/data_dataset_voc/SegmentationClassPNG/2011_000006.png
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_dataset_voc/SegmentationClassPNG/2011_000025.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/data_dataset_voc/SegmentationClassPNG/2011_000025.png
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_dataset_voc/SegmentationObjectPNG/2011_000003.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/data_dataset_voc/SegmentationObjectPNG/2011_000003.png
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_dataset_voc/SegmentationObjectPNG/2011_000006.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/data_dataset_voc/SegmentationObjectPNG/2011_000006.png
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_dataset_voc/SegmentationObjectPNG/2011_000025.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/data_dataset_voc/SegmentationObjectPNG/2011_000025.png
--------------------------------------------------------------------------------
/examples/semantic_segmentation/data_dataset_voc/SegmentationClassPNG/2011_000003.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/semantic_segmentation/data_dataset_voc/SegmentationClassPNG/2011_000003.png
--------------------------------------------------------------------------------
/examples/semantic_segmentation/data_dataset_voc/SegmentationClassPNG/2011_000006.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/semantic_segmentation/data_dataset_voc/SegmentationClassPNG/2011_000006.png
--------------------------------------------------------------------------------
/examples/semantic_segmentation/data_dataset_voc/SegmentationClassPNG/2011_000025.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/semantic_segmentation/data_dataset_voc/SegmentationClassPNG/2011_000025.png
--------------------------------------------------------------------------------
/examples/video_annotation/data_dataset_voc/SegmentationClassVisualization/00000100.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/data_dataset_voc/SegmentationClassVisualization/00000100.jpg
--------------------------------------------------------------------------------
/examples/video_annotation/data_dataset_voc/SegmentationClassVisualization/00000101.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/data_dataset_voc/SegmentationClassVisualization/00000101.jpg
--------------------------------------------------------------------------------
/examples/video_annotation/data_dataset_voc/SegmentationClassVisualization/00000102.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/data_dataset_voc/SegmentationClassVisualization/00000102.jpg
--------------------------------------------------------------------------------
/examples/video_annotation/data_dataset_voc/SegmentationClassVisualization/00000103.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/data_dataset_voc/SegmentationClassVisualization/00000103.jpg
--------------------------------------------------------------------------------
/examples/video_annotation/data_dataset_voc/SegmentationClassVisualization/00000104.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/video_annotation/data_dataset_voc/SegmentationClassVisualization/00000104.jpg
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_dataset_voc/SegmentationClassVisualization/2011_000003.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/data_dataset_voc/SegmentationClassVisualization/2011_000003.jpg
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_dataset_voc/SegmentationClassVisualization/2011_000006.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/data_dataset_voc/SegmentationClassVisualization/2011_000006.jpg
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_dataset_voc/SegmentationClassVisualization/2011_000025.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/data_dataset_voc/SegmentationClassVisualization/2011_000025.jpg
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_dataset_voc/SegmentationObjectVisualization/2011_000003.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/data_dataset_voc/SegmentationObjectVisualization/2011_000003.jpg
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_dataset_voc/SegmentationObjectVisualization/2011_000006.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/data_dataset_voc/SegmentationObjectVisualization/2011_000006.jpg
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_dataset_voc/SegmentationObjectVisualization/2011_000025.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/instance_segmentation/data_dataset_voc/SegmentationObjectVisualization/2011_000025.jpg
--------------------------------------------------------------------------------
/examples/semantic_segmentation/data_dataset_voc/SegmentationClassVisualization/2011_000003.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/semantic_segmentation/data_dataset_voc/SegmentationClassVisualization/2011_000003.jpg
--------------------------------------------------------------------------------
/examples/semantic_segmentation/data_dataset_voc/SegmentationClassVisualization/2011_000006.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/semantic_segmentation/data_dataset_voc/SegmentationClassVisualization/2011_000006.jpg
--------------------------------------------------------------------------------
/examples/semantic_segmentation/data_dataset_voc/SegmentationClassVisualization/2011_000025.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/D-K-E/labelme/master/examples/semantic_segmentation/data_dataset_voc/SegmentationClassVisualization/2011_000025.jpg
--------------------------------------------------------------------------------
/examples/classification/README.md:
--------------------------------------------------------------------------------
1 | # Classification Example
2 |
3 |
4 | ## Usage
5 |
6 | ```bash
7 | labelme data_annotated --flags flags.txt --nodata
8 | ```
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/examples/bbox_detection/labels.txt:
--------------------------------------------------------------------------------
1 | __ignore__
2 | _background_
3 | aeroplane
4 | bicycle
5 | bird
6 | boat
7 | bottle
8 | bus
9 | car
10 | cat
11 | chair
12 | cow
13 | diningtable
14 | dog
15 | horse
16 | motorbike
17 | person
18 | potted plant
19 | sheep
20 | sofa
21 | train
22 | tv/monitor
--------------------------------------------------------------------------------
/examples/bbox_detection/data_dataset_voc/class_names.txt:
--------------------------------------------------------------------------------
1 | _background_
2 | aeroplane
3 | bicycle
4 | bird
5 | boat
6 | bottle
7 | bus
8 | car
9 | cat
10 | chair
11 | cow
12 | diningtable
13 | dog
14 | horse
15 | motorbike
16 | person
17 | potted plant
18 | sheep
19 | sofa
20 | train
21 | tv/monitor
--------------------------------------------------------------------------------
/examples/instance_segmentation/labels.txt:
--------------------------------------------------------------------------------
1 | __ignore__
2 | _background_
3 | aeroplane
4 | bicycle
5 | bird
6 | boat
7 | bottle
8 | bus
9 | car
10 | cat
11 | chair
12 | cow
13 | diningtable
14 | dog
15 | horse
16 | motorbike
17 | person
18 | potted plant
19 | sheep
20 | sofa
21 | train
22 | tv/monitor
--------------------------------------------------------------------------------
/examples/semantic_segmentation/labels.txt:
--------------------------------------------------------------------------------
1 | __ignore__
2 | _background_
3 | aeroplane
4 | bicycle
5 | bird
6 | boat
7 | bottle
8 | bus
9 | car
10 | cat
11 | chair
12 | cow
13 | diningtable
14 | dog
15 | horse
16 | motorbike
17 | person
18 | potted plant
19 | sheep
20 | sofa
21 | train
22 | tv/monitor
--------------------------------------------------------------------------------
/labelme/widgets/escapable_qlist_widget.py:
--------------------------------------------------------------------------------
1 | from qtpy.QtCore import Qt
2 | from qtpy import QtWidgets
3 |
4 |
5 | class EscapableQListWidget(QtWidgets.QListWidget):
6 |
7 | def keyPressEvent(self, event):
8 | if event.key() == Qt.Key_Escape:
9 | self.clearSelection()
10 |
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_dataset_voc/class_names.txt:
--------------------------------------------------------------------------------
1 | _background_
2 | aeroplane
3 | bicycle
4 | bird
5 | boat
6 | bottle
7 | bus
8 | car
9 | cat
10 | chair
11 | cow
12 | diningtable
13 | dog
14 | horse
15 | motorbike
16 | person
17 | potted plant
18 | sheep
19 | sofa
20 | train
21 | tv/monitor
--------------------------------------------------------------------------------
/examples/semantic_segmentation/data_dataset_voc/class_names.txt:
--------------------------------------------------------------------------------
1 | _background_
2 | aeroplane
3 | bicycle
4 | bird
5 | boat
6 | bottle
7 | bus
8 | car
9 | cat
10 | chair
11 | cow
12 | diningtable
13 | dog
14 | horse
15 | motorbike
16 | person
17 | potted plant
18 | sheep
19 | sofa
20 | train
21 | tv/monitor
--------------------------------------------------------------------------------
/labelme/_version.py:
--------------------------------------------------------------------------------
1 | # Semantic Versioning 2.0.0: https://semver.org/
2 | # 1. MAJOR version when you make incompatible API changes;
3 | # 2. MINOR version when you add functionality in a backwards-compatible manner;
4 | # 3. PATCH version when you make backwards-compatible bug fixes.
5 | __version__ = '3.6.16'
6 |
--------------------------------------------------------------------------------
/examples/classification/data_annotated/0001.json:
--------------------------------------------------------------------------------
1 | {
2 | "flags": {
3 | "__ignore__": false,
4 | "cat": true,
5 | "dog": false
6 | },
7 | "shapes": [],
8 | "lineColor": [
9 | 0,
10 | 255,
11 | 0,
12 | 128
13 | ],
14 | "fillColor": [
15 | 255,
16 | 0,
17 | 0,
18 | 128
19 | ],
20 | "imagePath": "0001.jpg",
21 | "imageData": null
22 | }
--------------------------------------------------------------------------------
/examples/classification/data_annotated/0002.json:
--------------------------------------------------------------------------------
1 | {
2 | "flags": {
3 | "__ignore__": false,
4 | "cat": false,
5 | "dog": true
6 | },
7 | "shapes": [],
8 | "lineColor": [
9 | 0,
10 | 255,
11 | 0,
12 | 128
13 | ],
14 | "fillColor": [
15 | 255,
16 | 0,
17 | 0,
18 | 128
19 | ],
20 | "imagePath": "0002.jpg",
21 | "imageData": null
22 | }
--------------------------------------------------------------------------------
/labelme/widgets/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 |
3 | from .canvas import Canvas
4 |
5 | from .color_dialog import ColorDialog
6 |
7 | from .escapable_qlist_widget import EscapableQListWidget
8 |
9 | from .label_dialog import LabelDialog
10 | from .label_dialog import LabelQLineEdit
11 |
12 | from .label_qlist_widget import LabelQListWidget
13 |
14 | from .tool_bar import ToolBar
15 |
16 | from .zoom_widget import ZoomWidget
17 |
--------------------------------------------------------------------------------
/examples/video_annotation/README.md:
--------------------------------------------------------------------------------
1 | # Video Annotation Example
2 |
3 |
4 | ## Annotation
5 |
6 | ```bash
7 | labelme data_annotated --labels labels.txt --nodata --keep-prev
8 | ```
9 |
10 |
11 |
12 | *Fig 1. Video annotation example. A frame (left), The next frame (right).*
13 |
14 |
15 |
16 |
17 | *Fig 2. Visualization of video semantic segmentation.*
18 |
--------------------------------------------------------------------------------
/labelme/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 |
3 | import logging
4 | import sys
5 |
6 | from qtpy import QT_VERSION
7 |
8 |
9 | __appname__ = 'labelme'
10 |
11 | QT5 = QT_VERSION[0] == '5'
12 | del QT_VERSION
13 |
14 | PY2 = sys.version[0] == '2'
15 | PY3 = sys.version[0] == '3'
16 | del sys
17 |
18 | logging.basicConfig(level=logging.INFO)
19 | logger = logging.getLogger(__appname__)
20 | del logging
21 |
22 |
23 | from labelme._version import __version__
24 |
25 | from labelme import testing
26 | from labelme import utils
27 |
--------------------------------------------------------------------------------
/labelme/utils/image.py:
--------------------------------------------------------------------------------
1 | import base64
2 | import io
3 |
4 | import numpy as np
5 | import PIL.Image
6 |
7 |
8 | def img_b64_to_arr(img_b64):
9 | f = io.BytesIO()
10 | f.write(base64.b64decode(img_b64))
11 | img_arr = np.array(PIL.Image.open(f))
12 | return img_arr
13 |
14 |
15 | def img_arr_to_b64(img_arr):
16 | img_pil = PIL.Image.fromarray(img_arr)
17 | f = io.BytesIO()
18 | img_pil.save(f, format='PNG')
19 | img_bin = f.getvalue()
20 | if hasattr(base64, 'encodebytes'):
21 | img_b64 = base64.encodebytes(img_bin)
22 | else:
23 | img_b64 = base64.encodestring(img_bin)
24 | return img_b64
25 |
--------------------------------------------------------------------------------
/labelme/testing.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os.path as osp
3 |
4 | import labelme.utils
5 |
6 |
7 | def assert_labelfile_sanity(filename):
8 | assert osp.exists(filename)
9 |
10 | data = json.load(open(filename))
11 |
12 | assert 'imagePath' in data
13 | imageData = data.get('imageData', None)
14 | if imageData is None:
15 | assert osp.exists(data['imagePath'])
16 | img = labelme.utils.img_b64_to_arr(imageData)
17 |
18 | H, W = img.shape[:2]
19 | assert 'shapes' in data
20 | for shape in data['shapes']:
21 | assert 'label' in shape
22 | assert 'points' in shape
23 | for x, y in shape['points']:
24 | assert 0 <= x <= W
25 | assert 0 <= y <= H
26 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (C) 2016-2018 Kentaro Wada.
2 | Copyright (C) 2011 Michael Pitidis, Hussein Abdulwahid.
3 |
4 | Labelme is free software: you can redistribute it and/or modify
5 | it under the terms of the GNU General Public License as published by
6 | the Free Software Foundation, either version 3 of the License, or
7 | (at your option) any later version.
8 |
9 | Labelme is distributed in the hope that it will be useful,
10 | but WITHOUT ANY WARRANTY; without even the implied warranty of
11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 | GNU General Public License for more details.
13 |
14 | You should have received a copy of the GNU General Public License
15 | along with Labelme. If not, see .
16 |
--------------------------------------------------------------------------------
/examples/bbox_detection/README.md:
--------------------------------------------------------------------------------
1 | # Bounding Box Detection Example
2 |
3 |
4 | ## Usage
5 |
6 | ```bash
7 | labelme data_annotated --labels labels.txt --nodata --autosave
8 | ```
9 |
10 | 
11 |
12 |
13 | ## Convert to VOC-like Dataset
14 |
15 | ```bash
16 | # It generates:
17 | # - data_dataset_voc/JPEGImages
18 | # - data_dataset_voc/Annotations
19 | # - data_dataset_voc/AnnotationsVisualization
20 | ./labelme2voc.py labels.txt data_annotated data_dataset_voc
21 | ```
22 |
23 |
24 |
25 | Fig1. JPEG image (left), Bounding box annotation visualization (right).
26 |
--------------------------------------------------------------------------------
/labelme/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 |
3 | from ._io import lblsave
4 |
5 | from .image import img_arr_to_b64
6 | from .image import img_b64_to_arr
7 |
8 | from .shape import labelme_shapes_to_label
9 | from .shape import masks_to_bboxes
10 | from .shape import polygons_to_mask
11 | from .shape import shape_to_mask
12 | from .shape import shapes_to_label
13 |
14 | from .draw import draw_instances
15 | from .draw import draw_label
16 | from .draw import label_colormap
17 | from .draw import label2rgb
18 |
19 | from .qt import newIcon
20 | from .qt import newButton
21 | from .qt import newAction
22 | from .qt import addActions
23 | from .qt import labelValidator
24 | from .qt import struct
25 | from .qt import distance
26 | from .qt import distancetoline
27 | from .qt import fmtShortcut
28 |
--------------------------------------------------------------------------------
/labelme/widgets/zoom_widget.py:
--------------------------------------------------------------------------------
1 | from qtpy import QtCore
2 | from qtpy import QtGui
3 | from qtpy import QtWidgets
4 |
5 |
6 | class ZoomWidget(QtWidgets.QSpinBox):
7 |
8 | def __init__(self, value=100):
9 | super(ZoomWidget, self).__init__()
10 | self.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
11 | self.setRange(1, 500)
12 | self.setSuffix(' %')
13 | self.setValue(value)
14 | self.setToolTip('Zoom Level')
15 | self.setStatusTip(self.toolTip())
16 | self.setAlignment(QtCore.Qt.AlignCenter)
17 |
18 | def minimumSizeHint(self):
19 | height = super(ZoomWidget, self).minimumSizeHint().height()
20 | fm = QtGui.QFontMetrics(self.font())
21 | width = fm.width(str(self.maximum()))
22 | return QtCore.QSize(width, height)
23 |
--------------------------------------------------------------------------------
/labelme/utils/_io.py:
--------------------------------------------------------------------------------
1 | import os.path as osp
2 |
3 | import numpy as np
4 | import PIL.Image
5 |
6 | from labelme import logger
7 | from labelme.utils.draw import label_colormap
8 |
9 |
10 | def lblsave(filename, lbl):
11 | if osp.splitext(filename)[1] != '.png':
12 | filename += '.png'
13 | # Assume label ranses [-1, 254] for int32,
14 | # and [0, 255] for uint8 as VOC.
15 | if lbl.min() >= -1 and lbl.max() < 255:
16 | lbl_pil = PIL.Image.fromarray(lbl.astype(np.uint8), mode='P')
17 | colormap = label_colormap(255)
18 | lbl_pil.putpalette((colormap * 255).astype(np.uint8).flatten())
19 | lbl_pil.save(filename)
20 | else:
21 | logger.warn(
22 | '[%s] Cannot save the pixel-wise class label as PNG, '
23 | 'so please use the npy file.' % filename
24 | )
25 |
--------------------------------------------------------------------------------
/labelme/cli/draw_label_png.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import logging
3 |
4 | import matplotlib.pyplot as plt
5 | import numpy as np
6 | import PIL.Image
7 |
8 | from labelme import utils
9 |
10 |
11 | def main():
12 | logger = logging.Logger('labelme')
13 | logger.setLevel(logging.INFO)
14 |
15 | parser = argparse.ArgumentParser(
16 | formatter_class=argparse.ArgumentDefaultsHelpFormatter)
17 | parser.add_argument('label_png', help='label PNG file')
18 | args = parser.parse_args()
19 |
20 | lbl = np.asarray(PIL.Image.open(args.label_png))
21 |
22 | logger.info('label shape: {}'.format(lbl.shape))
23 | logger.info('unique label values: {}'.format(np.unique(lbl)))
24 |
25 | lbl_viz = utils.draw_label(lbl)
26 | plt.imshow(lbl_viz)
27 | plt.show()
28 |
29 |
30 | if __name__ == '__main__':
31 | main()
32 |
--------------------------------------------------------------------------------
/examples/bbox_detection/data_dataset_voc/Annotations/2011_000003.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | 2011_000003.jpg
4 |
5 |
6 |
7 |
8 | 338
9 | 500
10 | 3
11 |
12 |
13 |
25 |
37 |
38 |
--------------------------------------------------------------------------------
/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:xenial
2 |
3 | # http://fabiorehm.com/blog/2014/09/11/running-gui-apps-with-docker/
4 | RUN export uid=1000 gid=1000 && \
5 | mkdir -p /home/developer && \
6 | echo "developer:x:${uid}:${gid}:Developer,,,:/home/developer:/bin/bash" >> /etc/passwd && \
7 | echo "developer:x:${uid}:" >> /etc/group && \
8 | mkdir -p /etc/sudoers.d && \
9 | echo "developer ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/developer && \
10 | chmod 0440 /etc/sudoers.d/developer && \
11 | chown ${uid}:${gid} -R /home/developer
12 |
13 | RUN \
14 | apt-get update -qq && \
15 | apt-get upgrade -qq -y && \
16 | apt-get install -qq -y \
17 | git \
18 | python3 \
19 | python3-pip
20 |
21 | RUN \
22 | apt-get install -qq -y \
23 | python3-matplotlib \
24 | python3-pyqt5
25 |
26 | RUN pip3 install -v git+https://github.com/wkentaro/labelme.git
27 |
28 | USER developer
29 | ENV HOME /home/developer
30 |
--------------------------------------------------------------------------------
/examples/bbox_detection/data_annotated/2011_000003.json:
--------------------------------------------------------------------------------
1 | {
2 | "flags": {},
3 | "shapes": [
4 | {
5 | "label": "person",
6 | "line_color": null,
7 | "fill_color": null,
8 | "points": [
9 | [
10 | 191,
11 | 107
12 | ],
13 | [
14 | 313,
15 | 329
16 | ]
17 | ],
18 | "shape_type": "rectangle"
19 | },
20 | {
21 | "label": "person",
22 | "line_color": null,
23 | "fill_color": null,
24 | "points": [
25 | [
26 | 365,
27 | 83
28 | ],
29 | [
30 | 500,
31 | 333
32 | ]
33 | ],
34 | "shape_type": "rectangle"
35 | }
36 | ],
37 | "lineColor": [
38 | 0,
39 | 255,
40 | 0,
41 | 128
42 | ],
43 | "fillColor": [
44 | 255,
45 | 0,
46 | 0,
47 | 128
48 | ],
49 | "imagePath": "2011_000003.jpg",
50 | "imageData": null
51 | }
--------------------------------------------------------------------------------
/examples/tutorial/load_label_png.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from __future__ import print_function
4 |
5 | import os.path as osp
6 |
7 | import numpy as np
8 | import PIL.Image
9 |
10 |
11 | here = osp.dirname(osp.abspath(__file__))
12 |
13 |
14 | def main():
15 | label_png = osp.join(here, 'apc2016_obj3_json/label.png')
16 | print('Loading:', label_png)
17 | print()
18 |
19 | lbl = np.asarray(PIL.Image.open(label_png))
20 | labels = np.unique(lbl)
21 |
22 | label_names_txt = osp.join(here, 'apc2016_obj3_json/label_names.txt')
23 | label_names = [name.strip() for name in open(label_names_txt)]
24 | print('# of labels:', len(labels))
25 | print('# of label_names:', len(label_names))
26 | if len(labels) != len(label_names):
27 | print('Number of unique labels and label_names must be same.')
28 | quit(1)
29 | print()
30 |
31 | print('label: label_name')
32 | for label, label_name in zip(labels, label_names):
33 | print('%d: %s' % (label, label_name))
34 |
35 |
36 | if __name__ == '__main__':
37 | main()
38 |
--------------------------------------------------------------------------------
/labelme.spec:
--------------------------------------------------------------------------------
1 | # -*- mode: python -*-
2 | # vim: ft=python
3 |
4 |
5 | block_cipher = None
6 |
7 |
8 | a = Analysis(
9 | ['labelme/main.py'],
10 | pathex=['labelme'],
11 | binaries=[],
12 | datas=[
13 | ('labelme/config/default_config.yaml', 'labelme/config'),
14 | ('labelme/icons/*', 'labelme/icons'),
15 | ],
16 | hiddenimports=[],
17 | hookspath=[],
18 | runtime_hooks=[],
19 | excludes=['matplotlib'],
20 | win_no_prefer_redirects=False,
21 | win_private_assemblies=False,
22 | cipher=block_cipher,
23 | )
24 | pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
25 | exe = EXE(
26 | pyz,
27 | a.scripts,
28 | a.binaries,
29 | a.zipfiles,
30 | a.datas,
31 | name='labelme',
32 | debug=False,
33 | strip=False,
34 | upx=True,
35 | runtime_tmpdir=None,
36 | console=False,
37 | icon='labelme/icons/icon.ico',
38 | )
39 | app = BUNDLE(
40 | exe,
41 | name='labelme.app',
42 | icon='labelme/icons/icon.icns',
43 | bundle_identifier=None,
44 | info_plist={'NSHighResolutionCapable': 'True'},
45 | )
46 |
--------------------------------------------------------------------------------
/examples/bbox_detection/data_dataset_voc/Annotations/2011_000025.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | 2011_000025.jpg
4 |
5 |
6 |
7 |
8 | 375
9 | 500
10 | 3
11 |
12 |
13 |
25 |
37 |
49 |
50 |
--------------------------------------------------------------------------------
/examples/semantic_segmentation/README.md:
--------------------------------------------------------------------------------
1 | # Semantic Segmentation Example
2 |
3 | ## Annotation
4 |
5 | ```bash
6 | labelme data_annotated --labels labels.txt --nodata
7 | ```
8 |
9 | 
10 |
11 |
12 | ## Convert to VOC-like Dataset
13 |
14 | ```bash
15 | # It generates:
16 | # - data_dataset_voc/JPEGImages
17 | # - data_dataset_voc/SegmentationClass
18 | # - data_dataset_voc/SegmentationClassVisualization
19 | ./labelme2voc.py labels.txt data_annotated data_dataset_voc
20 | ```
21 |
22 |
23 |
24 | Fig 1. JPEG image (left), PNG label (center), JPEG label visualization (right)
25 |
26 |
27 | Note that the label file contains only very low label values (ex. `0, 4, 14`), and
28 | `255` indicates the `__ignore__` label value (`-1` in the npy file).
29 | You can see the label PNG file by following.
30 |
31 | ```bash
32 | labelme_draw_label_png data_dataset_voc/SegmentationClassPNG/2011_000003.png
33 | ```
34 |
35 |
36 |
--------------------------------------------------------------------------------
/labelme/widgets/tool_bar.py:
--------------------------------------------------------------------------------
1 | from qtpy import QtCore
2 | from qtpy import QtWidgets
3 |
4 |
5 | class ToolBar(QtWidgets.QToolBar):
6 |
7 | def __init__(self, title):
8 | super(ToolBar, self).__init__(title)
9 | layout = self.layout()
10 | m = (0, 0, 0, 0)
11 | layout.setSpacing(0)
12 | layout.setContentsMargins(*m)
13 | self.setContentsMargins(*m)
14 | self.setWindowFlags(self.windowFlags() | QtCore.Qt.FramelessWindowHint)
15 |
16 | def addAction(self, action):
17 | if isinstance(action, QtWidgets.QWidgetAction):
18 | return super(ToolBar, self).addAction(action)
19 | btn = ToolButton()
20 | btn.setDefaultAction(action)
21 | btn.setToolButtonStyle(self.toolButtonStyle())
22 | self.addWidget(btn)
23 |
24 |
25 | class ToolButton(QtWidgets.QToolButton):
26 |
27 | """ToolBar companion class which ensures all buttons have the same size."""
28 |
29 | minSize = (60, 60)
30 |
31 | def minimumSizeHint(self):
32 | ms = super(ToolButton, self).minimumSizeHint()
33 | w1, h1 = ms.width(), ms.height()
34 | w2, h2 = self.minSize
35 | self.minSize = max(w1, w2), max(h1, h2)
36 | return QtCore.QSize(*self.minSize)
37 |
--------------------------------------------------------------------------------
/examples/bbox_detection/data_annotated/2011_000025.json:
--------------------------------------------------------------------------------
1 | {
2 | "flags": {},
3 | "shapes": [
4 | {
5 | "label": "bus",
6 | "line_color": null,
7 | "fill_color": null,
8 | "points": [
9 | [
10 | 84,
11 | 20
12 | ],
13 | [
14 | 435,
15 | 373
16 | ]
17 | ],
18 | "shape_type": "rectangle"
19 | },
20 | {
21 | "label": "bus",
22 | "line_color": null,
23 | "fill_color": null,
24 | "points": [
25 | [
26 | 1,
27 | 99
28 | ],
29 | [
30 | 107,
31 | 282
32 | ]
33 | ],
34 | "shape_type": "rectangle"
35 | },
36 | {
37 | "label": "car",
38 | "line_color": null,
39 | "fill_color": null,
40 | "points": [
41 | [
42 | 409,
43 | 167
44 | ],
45 | [
46 | 500,
47 | 266
48 | ]
49 | ],
50 | "shape_type": "rectangle"
51 | }
52 | ],
53 | "lineColor": [
54 | 0,
55 | 255,
56 | 0,
57 | 128
58 | ],
59 | "fillColor": [
60 | 255,
61 | 0,
62 | 0,
63 | 128
64 | ],
65 | "imagePath": "2011_000025.jpg",
66 | "imageData": null
67 | }
--------------------------------------------------------------------------------
/labelme/widgets/color_dialog.py:
--------------------------------------------------------------------------------
1 | from qtpy import QtWidgets
2 |
3 |
4 | class ColorDialog(QtWidgets.QColorDialog):
5 |
6 | def __init__(self, parent=None):
7 | super(ColorDialog, self).__init__(parent)
8 | self.setOption(QtWidgets.QColorDialog.ShowAlphaChannel)
9 | # The Mac native dialog does not support our restore button.
10 | self.setOption(QtWidgets.QColorDialog.DontUseNativeDialog)
11 | # Add a restore defaults button.
12 | # The default is set at invocation time, so that it
13 | # works across dialogs for different elements.
14 | self.default = None
15 | self.bb = self.layout().itemAt(1).widget()
16 | self.bb.addButton(QtWidgets.QDialogButtonBox.RestoreDefaults)
17 | self.bb.clicked.connect(self.checkRestore)
18 |
19 | def getColor(self, value=None, title=None, default=None):
20 | self.default = default
21 | if title:
22 | self.setWindowTitle(title)
23 | if value:
24 | self.setCurrentColor(value)
25 | return self.currentColor() if self.exec_() else None
26 |
27 | def checkRestore(self, button):
28 | if self.bb.buttonRole(button) & \
29 | QtWidgets.QDialogButtonBox.ResetRole and self.default:
30 | self.setCurrentColor(self.default)
31 |
--------------------------------------------------------------------------------
/examples/bbox_detection/data_dataset_voc/Annotations/2011_000006.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | 2011_000006.jpg
4 |
5 |
6 |
7 |
8 | 375
9 | 500
10 | 3
11 |
12 |
13 |
25 |
37 |
49 |
61 |
62 |
--------------------------------------------------------------------------------
/examples/instance_segmentation/README.md:
--------------------------------------------------------------------------------
1 | # Instance Segmentation Example
2 |
3 | ## Annotation
4 |
5 | ```bash
6 | labelme data_annotated --labels labels.txt --nodata
7 | ```
8 |
9 | 
10 |
11 | ## Convert to VOC-like Dataset
12 |
13 | ```bash
14 | # It generates:
15 | # - data_dataset_voc/JPEGImages
16 | # - data_dataset_voc/SegmentationClass
17 | # - data_dataset_voc/SegmentationClassVisualization
18 | # - data_dataset_voc/SegmentationObject
19 | # - data_dataset_voc/SegmentationObjectVisualization
20 | ./labelme2voc.py labels.txt data_annotated data_dataset_voc
21 | ```
22 |
23 |
24 | Fig 1. JPEG image (left), JPEG class label visualization (center), JPEG instance label visualization (right)
25 |
26 |
27 | Note that the label file contains only very low label values (ex. `0, 4, 14`), and
28 | `255` indicates the `__ignore__` label value (`-1` in the npy file).
29 | You can see the label PNG file by following.
30 |
31 | ```bash
32 | labelme_draw_label_png data_dataset_voc/SegmentationClassPNG/2011_000003.png # left
33 | labelme_draw_label_png data_dataset_voc/SegmentationObjectPNG/2011_000003.png # right
34 | ```
35 |
36 |
37 |
--------------------------------------------------------------------------------
/labelme/widgets/label_qlist_widget.py:
--------------------------------------------------------------------------------
1 | from qtpy import QtWidgets
2 |
3 |
4 | class LabelQListWidget(QtWidgets.QListWidget):
5 |
6 | def __init__(self, *args, **kwargs):
7 | super(LabelQListWidget, self).__init__(*args, **kwargs)
8 | self.canvas = None
9 | self.itemsToShapes = []
10 |
11 | def get_shape_from_item(self, item):
12 | for index, (item_, shape) in enumerate(self.itemsToShapes):
13 | if item_ is item:
14 | return shape
15 |
16 | def get_item_from_shape(self, shape):
17 | for index, (item, shape_) in enumerate(self.itemsToShapes):
18 | if shape_ is shape:
19 | return item
20 |
21 | def clear(self):
22 | super(LabelQListWidget, self).clear()
23 | self.itemsToShapes = []
24 |
25 | def setParent(self, parent):
26 | self.parent = parent
27 |
28 | def dropEvent(self, event):
29 | shapes = self.shapes
30 | super(LabelQListWidget, self).dropEvent(event)
31 | if self.shapes == shapes:
32 | return
33 | if self.canvas is None:
34 | raise RuntimeError('self.canvas must be set beforehand.')
35 | self.parent.setDirty()
36 | self.canvas.loadShapes(self.shapes)
37 |
38 | @property
39 | def shapes(self):
40 | shapes = []
41 | for i in range(self.count()):
42 | item = self.item(i)
43 | shape = self.get_shape_from_item(item)
44 | shapes.append(shape)
45 | return shapes
46 |
--------------------------------------------------------------------------------
/examples/bbox_detection/data_annotated/2011_000006.json:
--------------------------------------------------------------------------------
1 | {
2 | "flags": {},
3 | "shapes": [
4 | {
5 | "label": "person",
6 | "line_color": null,
7 | "fill_color": null,
8 | "points": [
9 | [
10 | 91,
11 | 107
12 | ],
13 | [
14 | 240,
15 | 330
16 | ]
17 | ],
18 | "shape_type": "rectangle"
19 | },
20 | {
21 | "label": "person",
22 | "line_color": null,
23 | "fill_color": null,
24 | "points": [
25 | [
26 | 178,
27 | 110
28 | ],
29 | [
30 | 298,
31 | 282
32 | ]
33 | ],
34 | "shape_type": "rectangle"
35 | },
36 | {
37 | "label": "person",
38 | "line_color": null,
39 | "fill_color": null,
40 | "points": [
41 | [
42 | 254,
43 | 115
44 | ],
45 | [
46 | 369,
47 | 292
48 | ]
49 | ],
50 | "shape_type": "rectangle"
51 | },
52 | {
53 | "label": "person",
54 | "line_color": null,
55 | "fill_color": null,
56 | "points": [
57 | [
58 | 395,
59 | 81
60 | ],
61 | [
62 | 447,
63 | 117
64 | ]
65 | ],
66 | "shape_type": "rectangle"
67 | }
68 | ],
69 | "lineColor": [
70 | 0,
71 | 255,
72 | 0,
73 | 128
74 | ],
75 | "fillColor": [
76 | 255,
77 | 0,
78 | 0,
79 | 128
80 | ],
81 | "imagePath": "2011_000006.jpg",
82 | "imageData": null
83 | }
--------------------------------------------------------------------------------
/labelme/config/default_config.yaml:
--------------------------------------------------------------------------------
1 | auto_save: false
2 | display_label_popup: true
3 | store_data: true
4 | keep_prev: false
5 |
6 | flags: null
7 | labels: null
8 | file_search: null
9 | sort_labels: true
10 | validate_label: null
11 |
12 | # main
13 | flag_dock:
14 | show: true
15 | closable: true
16 | movable: true
17 | floatable: true
18 | label_dock:
19 | show: true
20 | closable: true
21 | movable: true
22 | floatable: true
23 | shape_dock:
24 | show: true
25 | closable: true
26 | movable: true
27 | floatable: true
28 | file_dock:
29 | show: true
30 | closable: true
31 | movable: true
32 | floatable: true
33 |
34 | # label_dialog
35 | show_label_text_field: true
36 | label_completion: startswith
37 | fit_to_content:
38 | column: true
39 | row: false
40 |
41 | epsilon: 11.0
42 |
43 | shortcuts:
44 | close: Ctrl+W
45 | open: Ctrl+O
46 | open_dir: Ctrl+U
47 | quit: Ctrl+Q
48 | save: Ctrl+S
49 | save_as: Ctrl+Shift+S
50 | save_to: null
51 |
52 | open_next: [D, Ctrl+Shift+D]
53 | open_prev: [A, Ctrl+Shift+A]
54 |
55 | zoom_in: [Ctrl++, Ctrl+=]
56 | zoom_out: Ctrl+-
57 | zoom_to_original: Ctrl+0
58 | fit_window: Ctrl+F
59 | fit_width: Ctrl+Shift+F
60 |
61 | add_point: Ctrl+Shift+P
62 | create_polygon: Ctrl+N
63 | create_rectangle: Ctrl+R
64 | create_circle: null
65 | create_line: null
66 | create_point: null
67 | create_linestrip: null
68 | edit_polygon: Ctrl+J
69 | delete_polygon: Delete
70 | duplicate_polygon: Ctrl+D
71 | undo: Ctrl+Z
72 | undo_last_point: [Ctrl+Z, Backspace]
73 | edit_label: Ctrl+E
74 | edit_line_color: Ctrl+L
75 | edit_fill_color: Ctrl+Shift+L
76 | toggle_keep_prev_mode: Ctrl+P
77 |
--------------------------------------------------------------------------------
/tests/test_app.py:
--------------------------------------------------------------------------------
1 | import os.path as osp
2 | import shutil
3 | import tempfile
4 |
5 | import labelme.app
6 | import labelme.config
7 | import labelme.testing
8 |
9 |
10 | here = osp.dirname(osp.abspath(__file__))
11 | data_dir = osp.join(here, 'data')
12 |
13 |
14 | def test_MainWindow_open(qtbot):
15 | win = labelme.app.MainWindow()
16 | qtbot.addWidget(win)
17 | win.show()
18 | win.close()
19 |
20 |
21 | def test_MainWindow_open_json(qtbot):
22 | filename = osp.join(data_dir, 'apc2016_obj3.json')
23 | labelme.testing.assert_labelfile_sanity(filename)
24 | win = labelme.app.MainWindow(filename=filename)
25 | qtbot.addWidget(win)
26 | win.show()
27 | win.close()
28 |
29 |
30 | def test_MainWindow_annotate_jpg(qtbot):
31 | tmp_dir = tempfile.mkdtemp()
32 | filename = osp.join(tmp_dir, 'apc2016_obj3.jpg')
33 | shutil.copy(osp.join(data_dir, 'apc2016_obj3.jpg'),
34 | filename)
35 | output = osp.join(tmp_dir, 'apc2016_obj3.json')
36 |
37 | config = labelme.config.get_default_config()
38 | win = labelme.app.MainWindow(
39 | config=config, filename=filename, output=output)
40 | qtbot.addWidget(win)
41 | win.show()
42 |
43 | def check_imageData():
44 | assert hasattr(win, 'imageData')
45 | assert win.imageData is not None
46 |
47 | qtbot.waitUntil(check_imageData) # wait for loadFile
48 |
49 | label = 'shelf'
50 | points = [
51 | (26, 70),
52 | (176, 730),
53 | (986, 742),
54 | (1184, 102),
55 | ]
56 | shape = label, points, None, None, 'polygon'
57 | shapes = [shape]
58 | win.loadLabels(shapes)
59 | win.saveFile()
60 |
61 | labelme.testing.assert_labelfile_sanity(output)
62 |
--------------------------------------------------------------------------------
/labelme/cli/draw_json.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import argparse
4 | import base64
5 | import json
6 | import os
7 | import sys
8 |
9 | import matplotlib.pyplot as plt
10 |
11 | from labelme import utils
12 |
13 |
14 | PY2 = sys.version_info[0] == 2
15 |
16 |
17 | def main():
18 | parser = argparse.ArgumentParser()
19 | parser.add_argument('json_file')
20 | args = parser.parse_args()
21 |
22 | json_file = args.json_file
23 |
24 | data = json.load(open(json_file))
25 |
26 | if data['imageData']:
27 | imageData = data['imageData']
28 | else:
29 | imagePath = os.path.join(os.path.dirname(json_file), data['imagePath'])
30 | with open(imagePath, 'rb') as f:
31 | imageData = f.read()
32 | imageData = base64.b64encode(imageData).decode('utf-8')
33 | img = utils.img_b64_to_arr(imageData)
34 |
35 | label_name_to_value = {'_background_': 0}
36 | for shape in sorted(data['shapes'], key=lambda x: x['label']):
37 | label_name = shape['label']
38 | if label_name in label_name_to_value:
39 | label_value = label_name_to_value[label_name]
40 | else:
41 | label_value = len(label_name_to_value)
42 | label_name_to_value[label_name] = label_value
43 | lbl = utils.shapes_to_label(img.shape, data['shapes'], label_name_to_value)
44 |
45 | label_names = [None] * (max(label_name_to_value.values()) + 1)
46 | for name, value in label_name_to_value.items():
47 | label_names[value] = name
48 | lbl_viz = utils.draw_label(lbl, img, label_names)
49 |
50 | plt.subplot(121)
51 | plt.imshow(img)
52 | plt.subplot(122)
53 | plt.imshow(lbl_viz)
54 | plt.show()
55 |
56 |
57 | if __name__ == '__main__':
58 | main()
59 |
--------------------------------------------------------------------------------
/examples/tutorial/README.md:
--------------------------------------------------------------------------------
1 | # Tutorial (Single Image Example)
2 |
3 | ## Annotation
4 |
5 | ```bash
6 | labelme apc2016_obj3.jpg -O apc2016_obj3.json
7 | ```
8 |
9 | 
10 |
11 |
12 | ## Visualization
13 |
14 | To view the json file quickly, you can use utility script:
15 |
16 | ```bash
17 | labelme_draw_json apc2016_obj3.json
18 | ```
19 |
20 |
21 |
22 |
23 | ## Convert to Dataset
24 |
25 | To convert the json to set of image and label, you can run following:
26 |
27 |
28 | ```bash
29 | labelme_json_to_dataset apc2016_obj3.json -o apc2016_obj3_json
30 | ```
31 |
32 | It generates standard files from the JSON file.
33 |
34 | - [img.png](apc2016_obj3_json/img.png): Image file.
35 | - [label.png](apc2016_obj3_json/label.png): uint8 label file.
36 | - [label_viz.png](apc2016_obj3_json/label_viz.png): Visualization of `label.png`.
37 | - [label_names.txt](apc2016_obj3_json/label_names.txt): Label names for values in `label.png`.
38 |
39 | ## How to load label PNG file?
40 |
41 | Note that loading `label.png` is a bit difficult
42 | (`scipy.misc.imread`, `skimage.io.imread` may not work correctly),
43 | and please use `PIL.Image.open` to avoid unexpected behavior:
44 |
45 | ```python
46 | # see load_label_png.py also.
47 | >>> import numpy as np
48 | >>> import PIL.Image
49 |
50 | >>> label_png = 'apc2016_obj3_json/label.png'
51 | >>> lbl = np.asarray(PIL.Image.open(label_png))
52 | >>> print(lbl.dtype)
53 | dtype('uint8')
54 | >>> np.unique(lbl)
55 | array([0, 1, 2, 3], dtype=uint8)
56 | >>> lbl.shape
57 | (907, 1210)
58 | ```
59 |
60 | Also, you can see the label PNG file by:
61 |
62 | ```python
63 | labelme_draw_label_png apc2016_obj3_json/label.png
64 | ```
65 |
66 |
67 |
--------------------------------------------------------------------------------
/labelme/config/__init__.py:
--------------------------------------------------------------------------------
1 | import os.path as osp
2 | import shutil
3 |
4 | import yaml
5 |
6 | from labelme import logger
7 |
8 |
9 | here = osp.dirname(osp.abspath(__file__))
10 |
11 |
12 | def update_dict(target_dict, new_dict, validate_item=None):
13 | for key, value in new_dict.items():
14 | if validate_item:
15 | validate_item(key, value)
16 | if key not in target_dict:
17 | logger.warn('Skipping unexpected key in config: {}'
18 | .format(key))
19 | continue
20 | if isinstance(target_dict[key], dict) and \
21 | isinstance(value, dict):
22 | update_dict(target_dict[key], value, validate_item=validate_item)
23 | else:
24 | target_dict[key] = value
25 |
26 |
27 | # -----------------------------------------------------------------------------
28 |
29 |
30 | def get_default_config():
31 | config_file = osp.join(here, 'default_config.yaml')
32 | with open(config_file) as f:
33 | config = yaml.load(f)
34 |
35 | # save default config to ~/.labelmerc
36 | user_config_file = osp.join(osp.expanduser('~'), '.labelmerc')
37 | if not osp.exists(user_config_file):
38 | try:
39 | shutil.copy(config_file, user_config_file)
40 | except Exception:
41 | logger.warn('Failed to save config: {}'.format(user_config_file))
42 |
43 | return config
44 |
45 |
46 | def validate_config_item(key, value):
47 | if key == 'validate_label' and value not in [None, 'exact', 'instance']:
48 | raise ValueError('Unexpected value `{}` for key `{}`'
49 | .format(value, key))
50 |
51 |
52 | def get_config(config_from_args=None, config_file=None):
53 | # Configuration load order:
54 | #
55 | # 1. default config (lowest priority)
56 | # 2. config file passed by command line argument or ~/.labelmerc
57 | # 3. command line argument (highest priority)
58 |
59 | # 1. default config
60 | config = get_default_config()
61 |
62 | # 2. config from yaml file
63 | if config_file is not None and osp.exists(config_file):
64 | with open(config_file) as f:
65 | user_config = yaml.load(f) or {}
66 | update_dict(config, user_config, validate_item=validate_config_item)
67 |
68 | # 3. command line argument
69 | if config_from_args is not None:
70 | update_dict(config, config_from_args,
71 | validate_item=validate_config_item)
72 |
73 | return config
74 |
--------------------------------------------------------------------------------
/labelme/utils/qt.py:
--------------------------------------------------------------------------------
1 | from math import sqrt
2 | import os.path as osp
3 |
4 | import numpy as np
5 |
6 | from qtpy import QtCore
7 | from qtpy import QtGui
8 | from qtpy import QtWidgets
9 |
10 |
11 | here = osp.dirname(osp.abspath(__file__))
12 |
13 |
14 | def newIcon(icon):
15 | icons_dir = osp.join(here, '../icons')
16 | return QtGui.QIcon(osp.join(':/', icons_dir, '%s.png' % icon))
17 |
18 |
19 | def newButton(text, icon=None, slot=None):
20 | b = QtWidgets.QPushButton(text)
21 | if icon is not None:
22 | b.setIcon(newIcon(icon))
23 | if slot is not None:
24 | b.clicked.connect(slot)
25 | return b
26 |
27 |
28 | def newAction(parent, text, slot=None, shortcut=None, icon=None,
29 | tip=None, checkable=False, enabled=True):
30 | """Create a new action and assign callbacks, shortcuts, etc."""
31 | a = QtWidgets.QAction(text, parent)
32 | if icon is not None:
33 | a.setIconText(text.replace(' ', '\n'))
34 | a.setIcon(newIcon(icon))
35 | if shortcut is not None:
36 | if isinstance(shortcut, (list, tuple)):
37 | a.setShortcuts(shortcut)
38 | else:
39 | a.setShortcut(shortcut)
40 | if tip is not None:
41 | a.setToolTip(tip)
42 | a.setStatusTip(tip)
43 | if slot is not None:
44 | a.triggered.connect(slot)
45 | if checkable:
46 | a.setCheckable(True)
47 | a.setEnabled(enabled)
48 | return a
49 |
50 |
51 | def addActions(widget, actions):
52 | for action in actions:
53 | if action is None:
54 | widget.addSeparator()
55 | elif isinstance(action, QtWidgets.QMenu):
56 | widget.addMenu(action)
57 | else:
58 | widget.addAction(action)
59 |
60 |
61 | def labelValidator():
62 | return QtGui.QRegExpValidator(QtCore.QRegExp(r'^[^ \t].+'), None)
63 |
64 |
65 | class struct(object):
66 | def __init__(self, **kwargs):
67 | self.__dict__.update(kwargs)
68 |
69 |
70 | def distance(p):
71 | return sqrt(p.x() * p.x() + p.y() * p.y())
72 |
73 |
74 | def distancetoline(point, line):
75 | p1, p2 = line
76 | p1 = np.array([p1.x(), p1.y()])
77 | p2 = np.array([p2.x(), p2.y()])
78 | p3 = np.array([point.x(), point.y()])
79 | if np.dot((p3 - p1), (p2 - p1)) < 0:
80 | return np.linalg.norm(p3 - p1)
81 | if np.dot((p3 - p2), (p1 - p2)) < 0:
82 | return np.linalg.norm(p3 - p2)
83 | return np.linalg.norm(np.cross(p2 - p1, p1 - p3)) / np.linalg.norm(p2 - p1)
84 |
85 |
86 | def fmtShortcut(text):
87 | mod, key = text.split('+', 1)
88 | return '%s+%s' % (mod, key)
89 |
--------------------------------------------------------------------------------
/examples/primitives/primitives.json:
--------------------------------------------------------------------------------
1 | {
2 | "version": "3.5.0",
3 | "flags": {},
4 | "shapes": [
5 | {
6 | "label": "rectangle",
7 | "line_color": null,
8 | "fill_color": null,
9 | "points": [
10 | [
11 | 32,
12 | 35
13 | ],
14 | [
15 | 132,
16 | 135
17 | ]
18 | ],
19 | "shape_type": "rectangle"
20 | },
21 | {
22 | "label": "circle",
23 | "line_color": null,
24 | "fill_color": null,
25 | "points": [
26 | [
27 | 195,
28 | 84
29 | ],
30 | [
31 | 225,
32 | 125
33 | ]
34 | ],
35 | "shape_type": "circle"
36 | },
37 | {
38 | "label": "rectangle",
39 | "line_color": null,
40 | "fill_color": null,
41 | "points": [
42 | [
43 | 391,
44 | 33
45 | ],
46 | [
47 | 542,
48 | 135
49 | ]
50 | ],
51 | "shape_type": "rectangle"
52 | },
53 | {
54 | "label": "polygon",
55 | "line_color": null,
56 | "fill_color": null,
57 | "points": [
58 | [
59 | 69,
60 | 318
61 | ],
62 | [
63 | 45,
64 | 403
65 | ],
66 | [
67 | 173,
68 | 406
69 | ],
70 | [
71 | 198,
72 | 321
73 | ]
74 | ],
75 | "shape_type": "polygon"
76 | },
77 | {
78 | "label": "line",
79 | "line_color": null,
80 | "fill_color": null,
81 | "points": [
82 | [
83 | 188,
84 | 178
85 | ],
86 | [
87 | 160,
88 | 224
89 | ]
90 | ],
91 | "shape_type": "line"
92 | },
93 | {
94 | "label": "point",
95 | "line_color": null,
96 | "fill_color": null,
97 | "points": [
98 | [
99 | 345,
100 | 174
101 | ]
102 | ],
103 | "shape_type": "point"
104 | },
105 | {
106 | "label": "line_strip",
107 | "line_color": null,
108 | "fill_color": null,
109 | "points": [
110 | [
111 | 441,
112 | 181
113 | ],
114 | [
115 | 403,
116 | 274
117 | ],
118 | [
119 | 545,
120 | 275
121 | ]
122 | ],
123 | "shape_type": "linestrip"
124 | }
125 | ],
126 | "lineColor": [
127 | 0,
128 | 255,
129 | 0,
130 | 128
131 | ],
132 | "fillColor": [
133 | 255,
134 | 0,
135 | 0,
136 | 128
137 | ],
138 | "imagePath": "primitives.jpg",
139 | "imageData": null
140 | }
--------------------------------------------------------------------------------
/labelme/cli/json_to_dataset.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import base64
3 | import json
4 | import os
5 | import os.path as osp
6 | import warnings
7 |
8 | import PIL.Image
9 | import yaml
10 |
11 | from labelme import utils
12 |
13 |
14 | def main():
15 | warnings.warn("This script is aimed to demonstrate how to convert the\n"
16 | "JSON file to a single image dataset, and not to handle\n"
17 | "multiple JSON files to generate a real-use dataset.")
18 |
19 | parser = argparse.ArgumentParser()
20 | parser.add_argument('json_file')
21 | parser.add_argument('-o', '--out', default=None)
22 | args = parser.parse_args()
23 |
24 | json_file = args.json_file
25 |
26 | if args.out is None:
27 | out_dir = osp.basename(json_file).replace('.', '_')
28 | out_dir = osp.join(osp.dirname(json_file), out_dir)
29 | else:
30 | out_dir = args.out
31 | if not osp.exists(out_dir):
32 | os.mkdir(out_dir)
33 |
34 | data = json.load(open(json_file))
35 |
36 | if data['imageData']:
37 | imageData = data['imageData']
38 | else:
39 | imagePath = os.path.join(os.path.dirname(json_file), data['imagePath'])
40 | with open(imagePath, 'rb') as f:
41 | imageData = f.read()
42 | imageData = base64.b64encode(imageData).decode('utf-8')
43 | img = utils.img_b64_to_arr(imageData)
44 |
45 | label_name_to_value = {'_background_': 0}
46 | for shape in sorted(data['shapes'], key=lambda x: x['label']):
47 | label_name = shape['label']
48 | if label_name in label_name_to_value:
49 | label_value = label_name_to_value[label_name]
50 | else:
51 | label_value = len(label_name_to_value)
52 | label_name_to_value[label_name] = label_value
53 | lbl = utils.shapes_to_label(img.shape, data['shapes'], label_name_to_value)
54 |
55 | label_names = [None] * (max(label_name_to_value.values()) + 1)
56 | for name, value in label_name_to_value.items():
57 | label_names[value] = name
58 | lbl_viz = utils.draw_label(lbl, img, label_names)
59 |
60 | PIL.Image.fromarray(img).save(osp.join(out_dir, 'img.png'))
61 | utils.lblsave(osp.join(out_dir, 'label.png'), lbl)
62 | PIL.Image.fromarray(lbl_viz).save(osp.join(out_dir, 'label_viz.png'))
63 |
64 | with open(osp.join(out_dir, 'label_names.txt'), 'w') as f:
65 | for lbl_name in label_names:
66 | f.write(lbl_name + '\n')
67 |
68 | warnings.warn('info.yaml is being replaced by label_names.txt')
69 | info = dict(label_names=label_names)
70 | with open(osp.join(out_dir, 'info.yaml'), 'w') as f:
71 | yaml.safe_dump(info, f, default_flow_style=False)
72 |
73 | print('Saved to: %s' % out_dir)
74 |
75 |
76 | if __name__ == '__main__':
77 | main()
78 |
--------------------------------------------------------------------------------
/examples/video_annotation/data_annotated/00000100.json:
--------------------------------------------------------------------------------
1 | {
2 | "flags": {},
3 | "shapes": [
4 | {
5 | "label": "track",
6 | "line_color": null,
7 | "fill_color": null,
8 | "points": [
9 | [
10 | 634,
11 | 204
12 | ],
13 | [
14 | 604,
15 | 275
16 | ],
17 | [
18 | 603,
19 | 340
20 | ],
21 | [
22 | 622,
23 | 363
24 | ],
25 | [
26 | 639,
27 | 363
28 | ],
29 | [
30 | 649,
31 | 354
32 | ],
33 | [
34 | 682,
35 | 383
36 | ],
37 | [
38 | 733,
39 | 390
40 | ],
41 | [
42 | 748,
43 | 364
44 | ],
45 | [
46 | 827,
47 | 359
48 | ],
49 | [
50 | 829,
51 | 250
52 | ],
53 | [
54 | 800,
55 | 194
56 | ],
57 | [
58 | 775,
59 | 185
60 | ],
61 | [
62 | 740,
63 | 199
64 | ]
65 | ]
66 | },
67 | {
68 | "label": "track",
69 | "line_color": null,
70 | "fill_color": null,
71 | "points": [
72 | [
73 | 860,
74 | 190
75 | ],
76 | [
77 | 997,
78 | 186
79 | ],
80 | [
81 | 998,
82 | 305
83 | ],
84 | [
85 | 924,
86 | 320
87 | ],
88 | [
89 | 905,
90 | 352
91 | ],
92 | [
93 | 877,
94 | 353
95 | ],
96 | [
97 | 869,
98 | 245
99 | ],
100 | [
101 | 879,
102 | 222
103 | ]
104 | ]
105 | },
106 | {
107 | "label": "car",
108 | "line_color": null,
109 | "fill_color": null,
110 | "points": [
111 | [
112 | 924,
113 | 321
114 | ],
115 | [
116 | 905,
117 | 352
118 | ],
119 | [
120 | 909,
121 | 388
122 | ],
123 | [
124 | 936,
125 | 404
126 | ],
127 | [
128 | 959,
129 | 411
130 | ],
131 | [
132 | 966,
133 | 431
134 | ],
135 | [
136 | 1000.0,
137 | 432.0
138 | ],
139 | [
140 | 1000.0,
141 | 306.0
142 | ]
143 | ]
144 | }
145 | ],
146 | "lineColor": [
147 | 0,
148 | 255,
149 | 0,
150 | 128
151 | ],
152 | "fillColor": [
153 | 255,
154 | 0,
155 | 0,
156 | 128
157 | ],
158 | "imagePath": "00000100.jpg",
159 | "imageData": null
160 | }
--------------------------------------------------------------------------------
/tests/widgets_tests/test_label_dialog.py:
--------------------------------------------------------------------------------
1 | from qtpy import QtCore
2 | from qtpy import QtWidgets
3 |
4 | from labelme.widgets import LabelDialog
5 | from labelme.widgets import LabelQLineEdit
6 |
7 |
8 | def test_LabelQLineEdit(qtbot):
9 | list_widget = QtWidgets.QListWidget()
10 | list_widget.addItems([
11 | 'cat',
12 | 'dog',
13 | 'person',
14 | ])
15 | widget = LabelQLineEdit()
16 | widget.setListWidget(list_widget)
17 | qtbot.addWidget(widget)
18 |
19 | # key press to navigate in label list
20 | item = widget.list_widget.findItems('cat', QtCore.Qt.MatchExactly)[0]
21 | widget.list_widget.setCurrentItem(item)
22 | assert widget.list_widget.currentItem().text() == 'cat'
23 | qtbot.keyPress(widget, QtCore.Qt.Key_Down)
24 | assert widget.list_widget.currentItem().text() == 'dog'
25 |
26 | # key press to enter label
27 | qtbot.keyPress(widget, QtCore.Qt.Key_P)
28 | qtbot.keyPress(widget, QtCore.Qt.Key_E)
29 | qtbot.keyPress(widget, QtCore.Qt.Key_R)
30 | qtbot.keyPress(widget, QtCore.Qt.Key_S)
31 | qtbot.keyPress(widget, QtCore.Qt.Key_O)
32 | qtbot.keyPress(widget, QtCore.Qt.Key_N)
33 | assert widget.text() == 'person'
34 |
35 |
36 | def test_LabelDialog_addLabelHistory(qtbot):
37 | labels = ['cat', 'dog', 'person']
38 | widget = LabelDialog(labels=labels, sort_labels=True)
39 | qtbot.addWidget(widget)
40 |
41 | widget.addLabelHistory('bicycle')
42 | assert widget.labelList.count() == 4
43 | widget.addLabelHistory('bicycle')
44 | assert widget.labelList.count() == 4
45 | item = widget.labelList.item(0)
46 | assert item.text() == 'bicycle'
47 |
48 |
49 | def test_LabelDialog_popUp(qtbot):
50 | labels = ['cat', 'dog', 'person']
51 | widget = LabelDialog(labels=labels, sort_labels=True)
52 | qtbot.addWidget(widget)
53 |
54 | # popUp(text='cat')
55 |
56 | def interact():
57 | qtbot.keyClick(widget.edit, QtCore.Qt.Key_P) # enter 'p' for 'person' # NOQA
58 | qtbot.keyClick(widget.edit, QtCore.Qt.Key_Enter) # NOQA
59 | qtbot.keyClick(widget.edit, QtCore.Qt.Key_Enter) # NOQA
60 |
61 | QtCore.QTimer.singleShot(500, interact)
62 | text = widget.popUp('cat')
63 | assert text == 'person'
64 |
65 | # popUp()
66 |
67 | def interact():
68 | qtbot.keyClick(widget.edit, QtCore.Qt.Key_Enter) # NOQA
69 | qtbot.keyClick(widget.edit, QtCore.Qt.Key_Enter) # NOQA
70 |
71 | QtCore.QTimer.singleShot(500, interact)
72 | text = widget.popUp()
73 | assert text == 'person'
74 |
75 | # popUp() + key_Up
76 |
77 | def interact():
78 | qtbot.keyClick(widget.edit, QtCore.Qt.Key_Up) # 'person' -> 'dog' # NOQA
79 | qtbot.keyClick(widget.edit, QtCore.Qt.Key_Enter) # NOQA
80 | qtbot.keyClick(widget.edit, QtCore.Qt.Key_Enter) # NOQA
81 |
82 | QtCore.QTimer.singleShot(500, interact)
83 | text = widget.popUp()
84 | assert text == 'dog'
85 |
--------------------------------------------------------------------------------
/examples/video_annotation/data_annotated/00000101.json:
--------------------------------------------------------------------------------
1 | {
2 | "flags": {},
3 | "shapes": [
4 | {
5 | "label": "track",
6 | "line_color": null,
7 | "fill_color": null,
8 | "points": [
9 | [
10 | 614.0,
11 | 204.0
12 | ],
13 | [
14 | 584.0,
15 | 275.0
16 | ],
17 | [
18 | 583.0,
19 | 340.0
20 | ],
21 | [
22 | 602.0,
23 | 363.0
24 | ],
25 | [
26 | 619.0,
27 | 363.0
28 | ],
29 | [
30 | 629.0,
31 | 354.0
32 | ],
33 | [
34 | 662.0,
35 | 383.0
36 | ],
37 | [
38 | 713.0,
39 | 390.0
40 | ],
41 | [
42 | 728.0,
43 | 364.0
44 | ],
45 | [
46 | 827.0,
47 | 358.0
48 | ],
49 | [
50 | 825.0,
51 | 249.0
52 | ],
53 | [
54 | 801.0,
55 | 200.0
56 | ],
57 | [
58 | 757.0,
59 | 194.0
60 | ],
61 | [
62 | 720.0,
63 | 199.0
64 | ]
65 | ]
66 | },
67 | {
68 | "label": "track",
69 | "line_color": null,
70 | "fill_color": null,
71 | "points": [
72 | [
73 | 860.0,
74 | 190.0
75 | ],
76 | [
77 | 997.0,
78 | 186.0
79 | ],
80 | [
81 | 998.0,
82 | 305.0
83 | ],
84 | [
85 | 924.0,
86 | 320.0
87 | ],
88 | [
89 | 905.0,
90 | 352.0
91 | ],
92 | [
93 | 877.0,
94 | 353.0
95 | ],
96 | [
97 | 869.0,
98 | 245.0
99 | ],
100 | [
101 | 879.0,
102 | 222.0
103 | ]
104 | ]
105 | },
106 | {
107 | "label": "car",
108 | "line_color": null,
109 | "fill_color": null,
110 | "points": [
111 | [
112 | 924.0,
113 | 321.0
114 | ],
115 | [
116 | 905.0,
117 | 352.0
118 | ],
119 | [
120 | 909.0,
121 | 388.0
122 | ],
123 | [
124 | 936.0,
125 | 404.0
126 | ],
127 | [
128 | 959.0,
129 | 411.0
130 | ],
131 | [
132 | 966.0,
133 | 431.0
134 | ],
135 | [
136 | 1000.0,
137 | 432.0
138 | ],
139 | [
140 | 1000.0,
141 | 306.0
142 | ]
143 | ]
144 | }
145 | ],
146 | "lineColor": [
147 | 0,
148 | 255,
149 | 0,
150 | 128
151 | ],
152 | "fillColor": [
153 | 255,
154 | 0,
155 | 0,
156 | 128
157 | ],
158 | "imagePath": "00000101.jpg",
159 | "imageData": null
160 | }
--------------------------------------------------------------------------------
/examples/video_annotation/data_annotated/00000102.json:
--------------------------------------------------------------------------------
1 | {
2 | "flags": {},
3 | "shapes": [
4 | {
5 | "label": "track",
6 | "line_color": null,
7 | "fill_color": null,
8 | "points": [
9 | [
10 | 593.0,
11 | 204.0
12 | ],
13 | [
14 | 563.0,
15 | 275.0
16 | ],
17 | [
18 | 562.0,
19 | 340.0
20 | ],
21 | [
22 | 581.0,
23 | 363.0
24 | ],
25 | [
26 | 598.0,
27 | 363.0
28 | ],
29 | [
30 | 608.0,
31 | 354.0
32 | ],
33 | [
34 | 641.0,
35 | 383.0
36 | ],
37 | [
38 | 692.0,
39 | 390.0
40 | ],
41 | [
42 | 707.0,
43 | 364.0
44 | ],
45 | [
46 | 827.0,
47 | 358.0
48 | ],
49 | [
50 | 823.0,
51 | 243.0
52 | ],
53 | [
54 | 802.0,
55 | 199.0
56 | ],
57 | [
58 | 736.0,
59 | 194.0
60 | ],
61 | [
62 | 699.0,
63 | 199.0
64 | ]
65 | ]
66 | },
67 | {
68 | "label": "track",
69 | "line_color": null,
70 | "fill_color": null,
71 | "points": [
72 | [
73 | 860.0,
74 | 190.0
75 | ],
76 | [
77 | 997.0,
78 | 186.0
79 | ],
80 | [
81 | 998.0,
82 | 305.0
83 | ],
84 | [
85 | 924.0,
86 | 320.0
87 | ],
88 | [
89 | 905.0,
90 | 352.0
91 | ],
92 | [
93 | 877.0,
94 | 353.0
95 | ],
96 | [
97 | 869.0,
98 | 245.0
99 | ],
100 | [
101 | 879.0,
102 | 222.0
103 | ]
104 | ]
105 | },
106 | {
107 | "label": "car",
108 | "line_color": null,
109 | "fill_color": null,
110 | "points": [
111 | [
112 | 924.0,
113 | 321.0
114 | ],
115 | [
116 | 905.0,
117 | 352.0
118 | ],
119 | [
120 | 909.0,
121 | 388.0
122 | ],
123 | [
124 | 936.0,
125 | 404.0
126 | ],
127 | [
128 | 959.0,
129 | 411.0
130 | ],
131 | [
132 | 966.0,
133 | 431.0
134 | ],
135 | [
136 | 1000.0,
137 | 432.0
138 | ],
139 | [
140 | 1000.0,
141 | 306.0
142 | ]
143 | ]
144 | }
145 | ],
146 | "lineColor": [
147 | 0,
148 | 255,
149 | 0,
150 | 128
151 | ],
152 | "fillColor": [
153 | 255,
154 | 0,
155 | 0,
156 | 128
157 | ],
158 | "imagePath": "00000102.jpg",
159 | "imageData": null
160 | }
--------------------------------------------------------------------------------
/examples/video_annotation/data_annotated/00000103.json:
--------------------------------------------------------------------------------
1 | {
2 | "flags": {},
3 | "shapes": [
4 | {
5 | "label": "track",
6 | "line_color": null,
7 | "fill_color": null,
8 | "points": [
9 | [
10 | 573.0,
11 | 207.0
12 | ],
13 | [
14 | 543.0,
15 | 278.0
16 | ],
17 | [
18 | 542.0,
19 | 343.0
20 | ],
21 | [
22 | 561.0,
23 | 366.0
24 | ],
25 | [
26 | 578.0,
27 | 366.0
28 | ],
29 | [
30 | 588.0,
31 | 357.0
32 | ],
33 | [
34 | 621.0,
35 | 386.0
36 | ],
37 | [
38 | 672.0,
39 | 393.0
40 | ],
41 | [
42 | 687.0,
43 | 367.0
44 | ],
45 | [
46 | 829.0,
47 | 354.0
48 | ],
49 | [
50 | 821.0,
51 | 236.0
52 | ],
53 | [
54 | 801.0,
55 | 199.0
56 | ],
57 | [
58 | 716.0,
59 | 197.0
60 | ],
61 | [
62 | 679.0,
63 | 202.0
64 | ]
65 | ]
66 | },
67 | {
68 | "label": "track",
69 | "line_color": null,
70 | "fill_color": null,
71 | "points": [
72 | [
73 | 860.0,
74 | 190.0
75 | ],
76 | [
77 | 997.0,
78 | 186.0
79 | ],
80 | [
81 | 998.0,
82 | 305.0
83 | ],
84 | [
85 | 924.0,
86 | 320.0
87 | ],
88 | [
89 | 905.0,
90 | 352.0
91 | ],
92 | [
93 | 877.0,
94 | 353.0
95 | ],
96 | [
97 | 869.0,
98 | 245.0
99 | ],
100 | [
101 | 879.0,
102 | 222.0
103 | ]
104 | ]
105 | },
106 | {
107 | "label": "car",
108 | "line_color": null,
109 | "fill_color": null,
110 | "points": [
111 | [
112 | 924.0,
113 | 321.0
114 | ],
115 | [
116 | 905.0,
117 | 352.0
118 | ],
119 | [
120 | 909.0,
121 | 388.0
122 | ],
123 | [
124 | 936.0,
125 | 404.0
126 | ],
127 | [
128 | 959.0,
129 | 411.0
130 | ],
131 | [
132 | 966.0,
133 | 431.0
134 | ],
135 | [
136 | 1000.0,
137 | 432.0
138 | ],
139 | [
140 | 1000.0,
141 | 306.0
142 | ]
143 | ]
144 | }
145 | ],
146 | "lineColor": [
147 | 0,
148 | 255,
149 | 0,
150 | 128
151 | ],
152 | "fillColor": [
153 | 255,
154 | 0,
155 | 0,
156 | 128
157 | ],
158 | "imagePath": "00000103.jpg",
159 | "imageData": null
160 | }
--------------------------------------------------------------------------------
/examples/video_annotation/data_annotated/00000104.json:
--------------------------------------------------------------------------------
1 | {
2 | "flags": {},
3 | "shapes": [
4 | {
5 | "label": "track",
6 | "line_color": null,
7 | "fill_color": null,
8 | "points": [
9 | [
10 | 556.0,
11 | 201.0
12 | ],
13 | [
14 | 528.0,
15 | 277.0
16 | ],
17 | [
18 | 524.0,
19 | 342.0
20 | ],
21 | [
22 | 528.0,
23 | 361.0
24 | ],
25 | [
26 | 563.0,
27 | 365.0
28 | ],
29 | [
30 | 573.0,
31 | 356.0
32 | ],
33 | [
34 | 606.0,
35 | 385.0
36 | ],
37 | [
38 | 657.0,
39 | 392.0
40 | ],
41 | [
42 | 672.0,
43 | 366.0
44 | ],
45 | [
46 | 825.0,
47 | 354.0
48 | ],
49 | [
50 | 826.0,
51 | 238.0
52 | ],
53 | [
54 | 801.0,
55 | 202.0
56 | ],
57 | [
58 | 701.0,
59 | 196.0
60 | ],
61 | [
62 | 664.0,
63 | 201.0
64 | ]
65 | ]
66 | },
67 | {
68 | "label": "track",
69 | "line_color": null,
70 | "fill_color": null,
71 | "points": [
72 | [
73 | 860.0,
74 | 190.0
75 | ],
76 | [
77 | 997.0,
78 | 186.0
79 | ],
80 | [
81 | 998.0,
82 | 305.0
83 | ],
84 | [
85 | 924.0,
86 | 320.0
87 | ],
88 | [
89 | 905.0,
90 | 352.0
91 | ],
92 | [
93 | 874.0,
94 | 354.0
95 | ],
96 | [
97 | 869.0,
98 | 245.0
99 | ],
100 | [
101 | 879.0,
102 | 222.0
103 | ]
104 | ]
105 | },
106 | {
107 | "label": "car",
108 | "line_color": null,
109 | "fill_color": null,
110 | "points": [
111 | [
112 | 924.0,
113 | 321.0
114 | ],
115 | [
116 | 905.0,
117 | 352.0
118 | ],
119 | [
120 | 909.0,
121 | 388.0
122 | ],
123 | [
124 | 936.0,
125 | 404.0
126 | ],
127 | [
128 | 959.0,
129 | 411.0
130 | ],
131 | [
132 | 966.0,
133 | 431.0
134 | ],
135 | [
136 | 1000.0,
137 | 432.0
138 | ],
139 | [
140 | 1000.0,
141 | 306.0
142 | ]
143 | ]
144 | }
145 | ],
146 | "lineColor": [
147 | 0,
148 | 255,
149 | 0,
150 | 128
151 | ],
152 | "fillColor": [
153 | 255,
154 | 0,
155 | 0,
156 | 128
157 | ],
158 | "imagePath": "00000104.jpg",
159 | "imageData": null
160 | }
--------------------------------------------------------------------------------
/labelme/cli/on_docker.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import argparse
4 | import distutils.spawn
5 | import json
6 | import os
7 | import os.path as osp
8 | import platform
9 | import shlex
10 | import subprocess
11 | import sys
12 |
13 | from labelme import logger
14 |
15 |
16 | def get_ip():
17 | dist = platform.platform().split('-')[0]
18 | if dist == 'Linux':
19 | return ''
20 | elif dist == 'Darwin':
21 | cmd = 'ifconfig en0'
22 | output = subprocess.check_output(shlex.split(cmd))
23 | if str != bytes: # Python3
24 | output = output.decode('utf-8')
25 | for row in output.splitlines():
26 | cols = row.strip().split(' ')
27 | if cols[0] == 'inet':
28 | ip = cols[1]
29 | return ip
30 | else:
31 | raise RuntimeError('No ip is found.')
32 | else:
33 | raise RuntimeError('Unsupported platform.')
34 |
35 |
36 | def labelme_on_docker(in_file, out_file):
37 | ip = get_ip()
38 | cmd = 'xhost + %s' % ip
39 | subprocess.check_output(shlex.split(cmd))
40 |
41 | if out_file:
42 | out_file = osp.abspath(out_file)
43 | if osp.exists(out_file):
44 | raise RuntimeError('File exists: %s' % out_file)
45 | else:
46 | open(osp.abspath(out_file), 'w')
47 |
48 | cmd = 'docker run -it --rm' \
49 | ' -e DISPLAY={0}:0' \
50 | ' -e QT_X11_NO_MITSHM=1' \
51 | ' -v /tmp/.X11-unix:/tmp/.X11-unix' \
52 | ' -v {1}:{2}' \
53 | ' -w /home/developer'
54 | in_file_a = osp.abspath(in_file)
55 | in_file_b = osp.join('/home/developer', osp.basename(in_file))
56 | cmd = cmd.format(
57 | ip,
58 | in_file_a,
59 | in_file_b,
60 | )
61 | if out_file:
62 | out_file_a = osp.abspath(out_file)
63 | out_file_b = osp.join('/home/developer', osp.basename(out_file))
64 | cmd += ' -v {0}:{1}'.format(out_file_a, out_file_b)
65 | cmd += ' wkentaro/labelme labelme {0}'.format(in_file_b)
66 | if out_file:
67 | cmd += ' -O {0}'.format(out_file_b)
68 | subprocess.call(shlex.split(cmd))
69 |
70 | if out_file:
71 | try:
72 | json.load(open(out_file))
73 | return out_file
74 | except Exception:
75 | if open(out_file).read() == '':
76 | os.remove(out_file)
77 | raise RuntimeError('Annotation is cancelled.')
78 |
79 |
80 | def main():
81 | parser = argparse.ArgumentParser()
82 | parser.add_argument('in_file', help='Input file or directory.')
83 | parser.add_argument('-O', '--output')
84 | args = parser.parse_args()
85 |
86 | if not distutils.spawn.find_executable('docker'):
87 | logger.error('Please install docker.')
88 | sys.exit(1)
89 |
90 | try:
91 | out_file = labelme_on_docker(args.in_file, args.output)
92 | if out_file:
93 | print('Saved to: %s' % out_file)
94 | except RuntimeError as e:
95 | sys.stderr.write(e.__str__() + '\n')
96 | sys.exit(1)
97 |
98 |
99 | if __name__ == '__main__':
100 | main()
101 |
--------------------------------------------------------------------------------
/examples/semantic_segmentation/labelme2voc.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from __future__ import print_function
4 |
5 | import argparse
6 | import glob
7 | import json
8 | import os
9 | import os.path as osp
10 |
11 | import numpy as np
12 | import PIL.Image
13 |
14 | import labelme
15 |
16 |
17 | def main():
18 | parser = argparse.ArgumentParser(
19 | formatter_class=argparse.ArgumentDefaultsHelpFormatter)
20 | parser.add_argument('labels_file')
21 | parser.add_argument('in_dir', help='input dir with annotated files')
22 | parser.add_argument('out_dir', help='output dataset directory')
23 | args = parser.parse_args()
24 |
25 | if osp.exists(args.out_dir):
26 | print('Output directory already exists:', args.out_dir)
27 | quit(1)
28 | os.makedirs(args.out_dir)
29 | os.makedirs(osp.join(args.out_dir, 'JPEGImages'))
30 | os.makedirs(osp.join(args.out_dir, 'SegmentationClass'))
31 | os.makedirs(osp.join(args.out_dir, 'SegmentationClassPNG'))
32 | os.makedirs(osp.join(args.out_dir, 'SegmentationClassVisualization'))
33 | print('Creating dataset:', args.out_dir)
34 |
35 | class_names = []
36 | class_name_to_id = {}
37 | for i, line in enumerate(open(args.labels_file).readlines()):
38 | class_id = i - 1 # starts with -1
39 | class_name = line.strip()
40 | class_name_to_id[class_name] = class_id
41 | if class_id == -1:
42 | assert class_name == '__ignore__'
43 | continue
44 | elif class_id == 0:
45 | assert class_name == '_background_'
46 | class_names.append(class_name)
47 | class_names = tuple(class_names)
48 | print('class_names:', class_names)
49 | out_class_names_file = osp.join(args.out_dir, 'class_names.txt')
50 | with open(out_class_names_file, 'w') as f:
51 | f.writelines('\n'.join(class_names))
52 | print('Saved class_names:', out_class_names_file)
53 |
54 | colormap = labelme.utils.label_colormap(255)
55 |
56 | for label_file in glob.glob(osp.join(args.in_dir, '*.json')):
57 | print('Generating dataset from:', label_file)
58 | with open(label_file) as f:
59 | base = osp.splitext(osp.basename(label_file))[0]
60 | out_img_file = osp.join(
61 | args.out_dir, 'JPEGImages', base + '.jpg')
62 | out_lbl_file = osp.join(
63 | args.out_dir, 'SegmentationClass', base + '.npy')
64 | out_png_file = osp.join(
65 | args.out_dir, 'SegmentationClassPNG', base + '.png')
66 | out_viz_file = osp.join(
67 | args.out_dir, 'SegmentationClassVisualization', base + '.jpg')
68 |
69 | data = json.load(f)
70 |
71 | img_file = osp.join(osp.dirname(label_file), data['imagePath'])
72 | img = np.asarray(PIL.Image.open(img_file))
73 | PIL.Image.fromarray(img).save(out_img_file)
74 |
75 | lbl = labelme.utils.shapes_to_label(
76 | img_shape=img.shape,
77 | shapes=data['shapes'],
78 | label_name_to_value=class_name_to_id,
79 | )
80 | labelme.utils.lblsave(out_png_file, lbl)
81 |
82 | np.save(out_lbl_file, lbl)
83 |
84 | viz = labelme.utils.draw_label(
85 | lbl, img, class_names, colormap=colormap)
86 | PIL.Image.fromarray(viz).save(out_viz_file)
87 |
88 |
89 | if __name__ == '__main__':
90 | main()
91 |
--------------------------------------------------------------------------------
/tests/test_utils.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os.path as osp
3 |
4 | import numpy as np
5 | import PIL.Image
6 |
7 | import labelme
8 |
9 |
10 | here = osp.dirname(osp.abspath(__file__))
11 | data_dir = osp.join(here, 'data')
12 |
13 |
14 | def _get_img_and_data():
15 | json_file = osp.join(data_dir, 'apc2016_obj3.json')
16 | data = json.load(open(json_file))
17 | img_b64 = data['imageData']
18 | img = labelme.utils.img_b64_to_arr(img_b64)
19 | return img, data
20 |
21 |
22 | def _get_img_and_lbl():
23 | img, data = _get_img_and_data()
24 |
25 | label_name_to_value = {'__background__': 0}
26 | for shape in data['shapes']:
27 | label_name = shape['label']
28 | label_value = len(label_name_to_value)
29 | label_name_to_value[label_name] = label_value
30 |
31 | n_labels = max(label_name_to_value.values()) + 1
32 | label_names = [None] * n_labels
33 | for label_name, label_value in label_name_to_value.items():
34 | label_names[label_value] = label_name
35 |
36 | lbl = labelme.utils.shapes_to_label(
37 | img.shape, data['shapes'], label_name_to_value)
38 | return img, lbl, label_names
39 |
40 |
41 | # -----------------------------------------------------------------------------
42 |
43 |
44 | def test_img_b64_to_arr():
45 | img, _ = _get_img_and_data()
46 | assert img.dtype == np.uint8
47 | assert img.shape == (907, 1210, 3)
48 |
49 |
50 | def test_img_arr_to_b64():
51 | img_file = osp.join(data_dir, 'apc2016_obj3.jpg')
52 | img_arr = np.asarray(PIL.Image.open(img_file))
53 | img_b64 = labelme.utils.img_arr_to_b64(img_arr)
54 | img_arr2 = labelme.utils.img_b64_to_arr(img_b64)
55 | np.testing.assert_allclose(img_arr, img_arr2)
56 |
57 |
58 | def test_shapes_to_label():
59 | img, data = _get_img_and_data()
60 | label_name_to_value = {}
61 | for shape in data['shapes']:
62 | label_name = shape['label']
63 | label_value = len(label_name_to_value)
64 | label_name_to_value[label_name] = label_value
65 | cls = labelme.utils.shapes_to_label(
66 | img.shape, data['shapes'], label_name_to_value)
67 | assert cls.shape == img.shape[:2]
68 |
69 |
70 | def test_shape_to_mask():
71 | img, data = _get_img_and_data()
72 | for shape in data['shapes']:
73 | points = shape['points']
74 | mask = labelme.utils.shape_to_mask(img.shape[:2], points)
75 | assert mask.shape == img.shape[:2]
76 |
77 |
78 | def test_label_colormap():
79 | N = 255
80 | colormap = labelme.utils.label_colormap(N=N)
81 | assert colormap.shape == (N, 3)
82 |
83 |
84 | def test_label2rgb():
85 | img, lbl, label_names = _get_img_and_lbl()
86 | n_labels = len(label_names)
87 |
88 | viz = labelme.utils.label2rgb(lbl=lbl, n_labels=n_labels)
89 | assert lbl.shape == viz.shape[:2]
90 | assert viz.dtype == np.uint8
91 |
92 | viz = labelme.utils.label2rgb(lbl=lbl, img=img, n_labels=n_labels)
93 | assert img.shape[:2] == lbl.shape == viz.shape[:2]
94 | assert viz.dtype == np.uint8
95 |
96 |
97 | def test_draw_label():
98 | img, lbl, label_names = _get_img_and_lbl()
99 |
100 | viz = labelme.utils.draw_label(lbl, img, label_names=label_names)
101 | assert viz.shape[:2] == img.shape[:2] == lbl.shape[:2]
102 | assert viz.dtype == np.uint8
103 |
104 |
105 | def test_draw_instances():
106 | img, lbl, label_names = _get_img_and_lbl()
107 | labels_and_masks = {l: lbl == l for l in np.unique(lbl) if l != 0}
108 | labels, masks = zip(*labels_and_masks.items())
109 | masks = np.asarray(masks)
110 | bboxes = labelme.utils.masks_to_bboxes(masks)
111 | captions = [label_names[l] for l in labels]
112 | viz = labelme.utils.draw_instances(img, bboxes, labels, captions=captions)
113 | assert viz.shape[:2] == img.shape[:2]
114 | assert viz.dtype == np.uint8
115 |
--------------------------------------------------------------------------------
/labelme/utils/shape.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | import numpy as np
4 | import PIL.Image
5 | import PIL.ImageDraw
6 |
7 | from labelme import logger
8 |
9 |
10 | def polygons_to_mask(img_shape, polygons, shape_type=None):
11 | logger.warning(
12 | "The 'polygons_to_mask' function is deprecated, "
13 | "use 'shape_to_mask' instead."
14 | )
15 | return shape_to_mask(img_shape, points=polygons, shape_type=shape_type)
16 |
17 |
18 | def shape_to_mask(img_shape, points, shape_type=None,
19 | line_width=10, point_size=5):
20 | mask = np.zeros(img_shape[:2], dtype=np.uint8)
21 | mask = PIL.Image.fromarray(mask)
22 | draw = PIL.ImageDraw.Draw(mask)
23 | xy = [tuple(point) for point in points]
24 | if shape_type == 'circle':
25 | assert len(xy) == 2, 'Shape of shape_type=circle must have 2 points'
26 | (cx, cy), (px, py) = xy
27 | d = math.sqrt((cx - px) ** 2 + (cy - py) ** 2)
28 | draw.ellipse([cx - d, cy - d, cx + d, cy + d], outline=1, fill=1)
29 | elif shape_type == 'rectangle':
30 | assert len(xy) == 2, 'Shape of shape_type=rectangle must have 2 points'
31 | draw.rectangle(xy, outline=1, fill=1)
32 | elif shape_type == 'line':
33 | assert len(xy) == 2, 'Shape of shape_type=line must have 2 points'
34 | draw.line(xy=xy, fill=1, width=line_width)
35 | elif shape_type == 'linestrip':
36 | draw.line(xy=xy, fill=1, width=line_width)
37 | elif shape_type == 'point':
38 | assert len(xy) == 1, 'Shape of shape_type=point must have 1 points'
39 | cx, cy = xy[0]
40 | r = point_size
41 | draw.ellipse([cx - r, cy - r, cx + r, cy + r], outline=1, fill=1)
42 | else:
43 | assert len(xy) > 2, 'Polygon must have points more than 2'
44 | draw.polygon(xy=xy, outline=1, fill=1)
45 | mask = np.array(mask, dtype=bool)
46 | return mask
47 |
48 |
49 | def shapes_to_label(img_shape, shapes, label_name_to_value, type='class'):
50 | assert type in ['class', 'instance']
51 |
52 | cls = np.zeros(img_shape[:2], dtype=np.int32)
53 | if type == 'instance':
54 | ins = np.zeros(img_shape[:2], dtype=np.int32)
55 | instance_names = ['_background_']
56 | for shape in shapes:
57 | points = shape['points']
58 | label = shape['label']
59 | shape_type = shape.get('shape_type', None)
60 | if type == 'class':
61 | cls_name = label
62 | elif type == 'instance':
63 | cls_name = label.split('-')[0]
64 | if label not in instance_names:
65 | instance_names.append(label)
66 | ins_id = len(instance_names) - 1
67 | cls_id = label_name_to_value[cls_name]
68 | mask = shape_to_mask(img_shape[:2], points, shape_type)
69 | cls[mask] = cls_id
70 | if type == 'instance':
71 | ins[mask] = ins_id
72 |
73 | if type == 'instance':
74 | return cls, ins
75 | return cls
76 |
77 |
78 | def labelme_shapes_to_label(img_shape, shapes):
79 | logger.warn('labelme_shapes_to_label is deprecated, so please use '
80 | 'shapes_to_label.')
81 |
82 | label_name_to_value = {'_background_': 0}
83 | for shape in shapes:
84 | label_name = shape['label']
85 | if label_name in label_name_to_value:
86 | label_value = label_name_to_value[label_name]
87 | else:
88 | label_value = len(label_name_to_value)
89 | label_name_to_value[label_name] = label_value
90 |
91 | lbl = shapes_to_label(img_shape, shapes, label_name_to_value)
92 | return lbl, label_name_to_value
93 |
94 |
95 | def masks_to_bboxes(masks):
96 | if masks.ndim != 3:
97 | raise ValueError(
98 | 'masks.ndim must be 3, but it is {}'
99 | .format(masks.ndim)
100 | )
101 | if masks.dtype != bool:
102 | raise ValueError(
103 | 'masks.dtype must be bool type, but it is {}'
104 | .format(masks.dtype)
105 | )
106 | bboxes = []
107 | for mask in masks:
108 | where = np.argwhere(mask)
109 | (y1, x1), (y2, x2) = where.min(0), where.max(0) + 1
110 | bboxes.append((y1, x1, y2, x2))
111 | bboxes = np.asarray(bboxes, dtype=np.float32)
112 | return bboxes
113 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 |
3 | import distutils.spawn
4 | import os.path
5 | from setuptools import find_packages
6 | from setuptools import setup
7 | import shlex
8 | import subprocess
9 | import sys
10 |
11 | import github2pypi
12 |
13 |
14 | PY3 = sys.version_info[0] == 3
15 | PY2 = sys.version_info[0] == 2
16 | assert PY3 or PY2
17 |
18 |
19 | here = os.path.abspath(os.path.dirname(__file__))
20 | version_file = os.path.join(here, 'labelme', '_version.py')
21 | if PY3:
22 | import importlib
23 | version = importlib.machinery.SourceFileLoader(
24 | '_version', version_file
25 | ).load_module().__version__
26 | else:
27 | assert PY2
28 | import imp
29 | version = imp.load_source('_version', version_file).__version__
30 | del here
31 |
32 |
33 | install_requires = [
34 | 'lxml',
35 | 'matplotlib',
36 | 'numpy',
37 | 'Pillow>=2.8.0',
38 | 'PyYAML',
39 | 'qtpy',
40 | ]
41 |
42 | # Find python binding for qt with priority:
43 | # PyQt5 -> PySide2 -> PyQt4,
44 | # and PyQt5 is automatically installed on Python3.
45 | QT_BINDING = None
46 |
47 | try:
48 | import PyQt5 # NOQA
49 | QT_BINDING = 'pyqt5'
50 | except ImportError:
51 | pass
52 |
53 | if QT_BINDING is None:
54 | try:
55 | import PySide2 # NOQA
56 | QT_BINDING = 'pyside2'
57 | except ImportError:
58 | pass
59 |
60 | if QT_BINDING is None:
61 | try:
62 | import PyQt4 # NOQA
63 | QT_BINDING = 'pyqt4'
64 | except ImportError:
65 | if PY2:
66 | print(
67 | 'Please install PyQt5, PySide2 or PyQt4 for Python2.\n'
68 | 'Note that PyQt5 can be installed via pip for Python3.',
69 | file=sys.stderr,
70 | )
71 | sys.exit(1)
72 | assert PY3
73 | # PyQt5 can be installed via pip for Python3
74 | install_requires.append('PyQt5')
75 | QT_BINDING = 'pyqt5'
76 | del QT_BINDING
77 |
78 |
79 | if sys.argv[1] == 'release':
80 | if not distutils.spawn.find_executable('twine'):
81 | print(
82 | 'Please install twine:\n\n\tpip install twine\n',
83 | file=sys.stderr,
84 | )
85 | sys.exit(1)
86 |
87 | commands = [
88 | 'git tag v{:s}'.format(version),
89 | 'git push origin master --tag',
90 | 'python setup.py sdist',
91 | 'twine upload dist/labelme-{:s}.tar.gz'.format(version),
92 | ]
93 | for cmd in commands:
94 | subprocess.check_call(shlex.split(cmd))
95 | sys.exit(0)
96 |
97 |
98 | if not hasattr(github2pypi, '__file__'):
99 | print('Please update submodule:\n\n\tgit submodule update --init')
100 | sys.exit(1)
101 |
102 |
103 | with open('README.md') as f:
104 | long_description = github2pypi.replace_url(
105 | slug='wkentaro/labelme', content=f.read()
106 | )
107 |
108 |
109 | setup(
110 | name='labelme',
111 | version=version,
112 | packages=find_packages(),
113 | description='Image Polygonal Annotation with Python',
114 | long_description=long_description,
115 | long_description_content_type='text/markdown',
116 | author='Kentaro Wada',
117 | author_email='www.kentaro.wada@gmail.com',
118 | url='https://github.com/wkentaro/labelme',
119 | install_requires=install_requires,
120 | license='GPLv3',
121 | keywords='Image Annotation, Machine Learning',
122 | classifiers=[
123 | 'Development Status :: 5 - Production/Stable',
124 | 'Intended Audience :: Developers',
125 | 'Natural Language :: English',
126 | 'Programming Language :: Python',
127 | 'Programming Language :: Python :: 2',
128 | 'Programming Language :: Python :: 3',
129 | 'Programming Language :: Python :: Implementation :: CPython',
130 | 'Programming Language :: Python :: Implementation :: PyPy',
131 | ],
132 | package_data={'labelme': ['icons/*', 'config/*.yaml']},
133 | entry_points={
134 | 'console_scripts': [
135 | 'labelme=labelme.main:main',
136 | 'labelme_draw_json=labelme.cli.draw_json:main',
137 | 'labelme_draw_label_png=labelme.cli.draw_label_png:main',
138 | 'labelme_json_to_dataset=labelme.cli.json_to_dataset:main',
139 | 'labelme_on_docker=labelme.cli.on_docker:main',
140 | ],
141 | },
142 | )
143 |
--------------------------------------------------------------------------------
/examples/instance_segmentation/labelme2voc.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from __future__ import print_function
4 |
5 | import argparse
6 | import glob
7 | import json
8 | import os
9 | import os.path as osp
10 |
11 | import numpy as np
12 | import PIL.Image
13 |
14 | import labelme
15 |
16 |
17 | def main():
18 | parser = argparse.ArgumentParser(
19 | formatter_class=argparse.ArgumentDefaultsHelpFormatter)
20 | parser.add_argument('labels_file')
21 | parser.add_argument('in_dir')
22 | parser.add_argument('out_dir')
23 | args = parser.parse_args()
24 |
25 | if osp.exists(args.out_dir):
26 | print('Output directory already exists:', args.out_dir)
27 | quit(1)
28 | os.makedirs(args.out_dir)
29 | os.makedirs(osp.join(args.out_dir, 'JPEGImages'))
30 | os.makedirs(osp.join(args.out_dir, 'SegmentationClass'))
31 | os.makedirs(osp.join(args.out_dir, 'SegmentationClassPNG'))
32 | os.makedirs(osp.join(args.out_dir, 'SegmentationClassVisualization'))
33 | os.makedirs(osp.join(args.out_dir, 'SegmentationObject'))
34 | os.makedirs(osp.join(args.out_dir, 'SegmentationObjectPNG'))
35 | os.makedirs(osp.join(args.out_dir, 'SegmentationObjectVisualization'))
36 | print('Creating dataset:', args.out_dir)
37 |
38 | class_names = []
39 | class_name_to_id = {}
40 | for i, line in enumerate(open(args.labels_file).readlines()):
41 | class_id = i - 1 # starts with -1
42 | class_name = line.strip()
43 | class_name_to_id[class_name] = class_id
44 | if class_id == -1:
45 | assert class_name == '__ignore__'
46 | continue
47 | elif class_id == 0:
48 | assert class_name == '_background_'
49 | class_names.append(class_name)
50 | class_names = tuple(class_names)
51 | print('class_names:', class_names)
52 | out_class_names_file = osp.join(args.out_dir, 'class_names.txt')
53 | with open(out_class_names_file, 'w') as f:
54 | f.writelines('\n'.join(class_names))
55 | print('Saved class_names:', out_class_names_file)
56 |
57 | colormap = labelme.utils.label_colormap(255)
58 |
59 | for label_file in glob.glob(osp.join(args.in_dir, '*.json')):
60 | print('Generating dataset from:', label_file)
61 | with open(label_file) as f:
62 | base = osp.splitext(osp.basename(label_file))[0]
63 | out_img_file = osp.join(
64 | args.out_dir, 'JPEGImages', base + '.jpg')
65 | out_cls_file = osp.join(
66 | args.out_dir, 'SegmentationClass', base + '.npy')
67 | out_clsp_file = osp.join(
68 | args.out_dir, 'SegmentationClassPNG', base + '.png')
69 | out_clsv_file = osp.join(
70 | args.out_dir, 'SegmentationClassVisualization', base + '.jpg')
71 | out_ins_file = osp.join(
72 | args.out_dir, 'SegmentationObject', base + '.npy')
73 | out_insp_file = osp.join(
74 | args.out_dir, 'SegmentationObjectPNG', base + '.png')
75 | out_insv_file = osp.join(
76 | args.out_dir, 'SegmentationObjectVisualization', base + '.jpg')
77 |
78 | data = json.load(f)
79 |
80 | img_file = osp.join(osp.dirname(label_file), data['imagePath'])
81 | img = np.asarray(PIL.Image.open(img_file))
82 | PIL.Image.fromarray(img).save(out_img_file)
83 |
84 | cls, ins = labelme.utils.shapes_to_label(
85 | img_shape=img.shape,
86 | shapes=data['shapes'],
87 | label_name_to_value=class_name_to_id,
88 | type='instance',
89 | )
90 | ins[cls == -1] = 0 # ignore it.
91 |
92 | # class label
93 | labelme.utils.lblsave(out_clsp_file, cls)
94 | np.save(out_cls_file, cls)
95 | clsv = labelme.utils.draw_label(
96 | cls, img, class_names, colormap=colormap)
97 | PIL.Image.fromarray(clsv).save(out_clsv_file)
98 |
99 | # instance label
100 | labelme.utils.lblsave(out_insp_file, ins)
101 | np.save(out_ins_file, ins)
102 | instance_ids = np.unique(ins)
103 | instance_names = [str(i) for i in range(max(instance_ids) + 1)]
104 | insv = labelme.utils.draw_label(ins, img, instance_names)
105 | PIL.Image.fromarray(insv).save(out_insv_file)
106 |
107 |
108 | if __name__ == '__main__':
109 | main()
110 |
--------------------------------------------------------------------------------
/examples/bbox_detection/labelme2voc.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from __future__ import print_function
4 |
5 | import argparse
6 | import glob
7 | import json
8 | import os
9 | import os.path as osp
10 |
11 | import lxml.builder
12 | import lxml.etree
13 | import numpy as np
14 | import PIL.Image
15 |
16 | import labelme
17 |
18 |
19 | def main():
20 | parser = argparse.ArgumentParser(
21 | formatter_class=argparse.ArgumentDefaultsHelpFormatter)
22 | parser.add_argument('labels_file')
23 | parser.add_argument('in_dir', help='input dir with annotated files')
24 | parser.add_argument('out_dir', help='output dataset directory')
25 | args = parser.parse_args()
26 |
27 | if osp.exists(args.out_dir):
28 | print('Output directory already exists:', args.out_dir)
29 | quit(1)
30 | os.makedirs(args.out_dir)
31 | os.makedirs(osp.join(args.out_dir, 'JPEGImages'))
32 | os.makedirs(osp.join(args.out_dir, 'Annotations'))
33 | os.makedirs(osp.join(args.out_dir, 'AnnotationsVisualization'))
34 | print('Creating dataset:', args.out_dir)
35 |
36 | class_names = []
37 | class_name_to_id = {}
38 | for i, line in enumerate(open(args.labels_file).readlines()):
39 | class_id = i - 1 # starts with -1
40 | class_name = line.strip()
41 | class_name_to_id[class_name] = class_id
42 | if class_id == -1:
43 | assert class_name == '__ignore__'
44 | continue
45 | elif class_id == 0:
46 | assert class_name == '_background_'
47 | class_names.append(class_name)
48 | class_names = tuple(class_names)
49 | print('class_names:', class_names)
50 | out_class_names_file = osp.join(args.out_dir, 'class_names.txt')
51 | with open(out_class_names_file, 'w') as f:
52 | f.writelines('\n'.join(class_names))
53 | print('Saved class_names:', out_class_names_file)
54 |
55 | for label_file in glob.glob(osp.join(args.in_dir, '*.json')):
56 | print('Generating dataset from:', label_file)
57 | with open(label_file) as f:
58 | data = json.load(f)
59 | base = osp.splitext(osp.basename(label_file))[0]
60 | out_img_file = osp.join(
61 | args.out_dir, 'JPEGImages', base + '.jpg')
62 | out_xml_file = osp.join(
63 | args.out_dir, 'Annotations', base + '.xml')
64 | out_viz_file = osp.join(
65 | args.out_dir, 'AnnotationsVisualization', base + '.jpg')
66 |
67 | img_file = osp.join(osp.dirname(label_file), data['imagePath'])
68 | img = np.asarray(PIL.Image.open(img_file))
69 | PIL.Image.fromarray(img).save(out_img_file)
70 |
71 | maker = lxml.builder.ElementMaker()
72 | xml = maker.annotation(
73 | maker.folder(),
74 | maker.filename(base + '.jpg'),
75 | maker.database(), # e.g., The VOC2007 Database
76 | maker.annotation(), # e.g., Pascal VOC2007
77 | maker.image(), # e.g., flickr
78 | maker.size(
79 | maker.height(str(img.shape[0])),
80 | maker.width(str(img.shape[1])),
81 | maker.depth(str(img.shape[2])),
82 | ),
83 | maker.segmented(),
84 | )
85 |
86 | bboxes = []
87 | labels = []
88 | for shape in data['shapes']:
89 | if shape['shape_type'] != 'rectangle':
90 | print('Skipping shape: label={label}, shape_type={shape_type}'
91 | .format(**shape))
92 | continue
93 |
94 | class_name = shape['label']
95 | class_id = class_names.index(class_name)
96 |
97 | (xmin, ymin), (xmax, ymax) = shape['points']
98 | bboxes.append([xmin, ymin, xmax, ymax])
99 | labels.append(class_id)
100 |
101 | xml.append(
102 | maker.object(
103 | maker.name(shape['label']),
104 | maker.pose(),
105 | maker.truncated(),
106 | maker.difficult(),
107 | maker.bndbox(
108 | maker.xmin(str(xmin)),
109 | maker.ymin(str(ymin)),
110 | maker.xmax(str(xmax)),
111 | maker.ymax(str(ymax)),
112 | ),
113 | )
114 | )
115 |
116 | captions = [class_names[l] for l in labels]
117 | viz = labelme.utils.draw_instances(
118 | img, bboxes, labels, captions=captions
119 | )
120 | PIL.Image.fromarray(viz).save(out_viz_file)
121 |
122 | with open(out_xml_file, 'wb') as f:
123 | f.write(lxml.etree.tostring(xml, pretty_print=True))
124 |
125 |
126 | if __name__ == '__main__':
127 | main()
128 |
--------------------------------------------------------------------------------
/examples/semantic_segmentation/data_annotated/2011_000025.json:
--------------------------------------------------------------------------------
1 | {
2 | "shapes": [
3 | {
4 | "label": "bus",
5 | "line_color": null,
6 | "fill_color": null,
7 | "points": [
8 | [
9 | 260.936170212766,
10 | 22.563829787234056
11 | ],
12 | [
13 | 193.936170212766,
14 | 19.563829787234056
15 | ],
16 | [
17 | 124.93617021276599,
18 | 39.563829787234056
19 | ],
20 | [
21 | 89.93617021276599,
22 | 101.56382978723406
23 | ],
24 | [
25 | 81.93617021276599,
26 | 150.56382978723406
27 | ],
28 | [
29 | 108.93617021276599,
30 | 145.56382978723406
31 | ],
32 | [
33 | 88.93617021276599,
34 | 244.56382978723406
35 | ],
36 | [
37 | 89.93617021276599,
38 | 322.56382978723406
39 | ],
40 | [
41 | 116.93617021276599,
42 | 367.56382978723406
43 | ],
44 | [
45 | 158.936170212766,
46 | 368.56382978723406
47 | ],
48 | [
49 | 165.936170212766,
50 | 337.56382978723406
51 | ],
52 | [
53 | 347.936170212766,
54 | 335.56382978723406
55 | ],
56 | [
57 | 349.936170212766,
58 | 369.56382978723406
59 | ],
60 | [
61 | 391.936170212766,
62 | 373.56382978723406
63 | ],
64 | [
65 | 403.936170212766,
66 | 335.56382978723406
67 | ],
68 | [
69 | 425.936170212766,
70 | 332.56382978723406
71 | ],
72 | [
73 | 421.936170212766,
74 | 281.56382978723406
75 | ],
76 | [
77 | 428.936170212766,
78 | 252.56382978723406
79 | ],
80 | [
81 | 428.936170212766,
82 | 236.56382978723406
83 | ],
84 | [
85 | 409.936170212766,
86 | 220.56382978723406
87 | ],
88 | [
89 | 409.936170212766,
90 | 150.56382978723406
91 | ],
92 | [
93 | 430.936170212766,
94 | 143.56382978723406
95 | ],
96 | [
97 | 433.936170212766,
98 | 112.56382978723406
99 | ],
100 | [
101 | 431.936170212766,
102 | 96.56382978723406
103 | ],
104 | [
105 | 408.936170212766,
106 | 90.56382978723406
107 | ],
108 | [
109 | 395.936170212766,
110 | 50.563829787234056
111 | ],
112 | [
113 | 338.936170212766,
114 | 25.563829787234056
115 | ]
116 | ]
117 | },
118 | {
119 | "label": "bus",
120 | "line_color": null,
121 | "fill_color": null,
122 | "points": [
123 | [
124 | 88.93617021276599,
125 | 115.56382978723406
126 | ],
127 | [
128 | 0.9361702127659877,
129 | 96.56382978723406
130 | ],
131 | [
132 | 0.0,
133 | 251.968085106388
134 | ],
135 | [
136 | 0.9361702127659877,
137 | 265.56382978723406
138 | ],
139 | [
140 | 27.936170212765987,
141 | 265.56382978723406
142 | ],
143 | [
144 | 29.936170212765987,
145 | 283.56382978723406
146 | ],
147 | [
148 | 63.93617021276599,
149 | 281.56382978723406
150 | ],
151 | [
152 | 89.93617021276599,
153 | 252.56382978723406
154 | ],
155 | [
156 | 100.93617021276599,
157 | 183.56382978723406
158 | ],
159 | [
160 | 108.93617021276599,
161 | 145.56382978723406
162 | ],
163 | [
164 | 81.93617021276599,
165 | 151.56382978723406
166 | ]
167 | ]
168 | },
169 | {
170 | "label": "car",
171 | "line_color": null,
172 | "fill_color": null,
173 | "points": [
174 | [
175 | 413.936170212766,
176 | 168.56382978723406
177 | ],
178 | [
179 | 497.936170212766,
180 | 168.56382978723406
181 | ],
182 | [
183 | 497.936170212766,
184 | 256.56382978723406
185 | ],
186 | [
187 | 431.936170212766,
188 | 258.56382978723406
189 | ],
190 | [
191 | 430.936170212766,
192 | 236.56382978723406
193 | ],
194 | [
195 | 408.936170212766,
196 | 218.56382978723406
197 | ]
198 | ]
199 | }
200 | ],
201 | "lineColor": [
202 | 0,
203 | 255,
204 | 0,
205 | 128
206 | ],
207 | "fillColor": [
208 | 255,
209 | 0,
210 | 0,
211 | 128
212 | ],
213 | "imagePath": "2011_000025.jpg",
214 | "imageData": null
215 | }
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_annotated/2011_000025.json:
--------------------------------------------------------------------------------
1 | {
2 | "imageData": null,
3 | "shapes": [
4 | {
5 | "fill_color": null,
6 | "line_color": null,
7 | "label": "bus-1",
8 | "points": [
9 | [
10 | 260.936170212766,
11 | 22.563829787234056
12 | ],
13 | [
14 | 193.936170212766,
15 | 19.563829787234056
16 | ],
17 | [
18 | 124.93617021276599,
19 | 39.563829787234056
20 | ],
21 | [
22 | 89.93617021276599,
23 | 101.56382978723406
24 | ],
25 | [
26 | 81.93617021276599,
27 | 150.56382978723406
28 | ],
29 | [
30 | 108.93617021276599,
31 | 145.56382978723406
32 | ],
33 | [
34 | 88.93617021276599,
35 | 244.56382978723406
36 | ],
37 | [
38 | 89.93617021276599,
39 | 322.56382978723406
40 | ],
41 | [
42 | 116.93617021276599,
43 | 367.56382978723406
44 | ],
45 | [
46 | 158.936170212766,
47 | 368.56382978723406
48 | ],
49 | [
50 | 165.936170212766,
51 | 337.56382978723406
52 | ],
53 | [
54 | 347.936170212766,
55 | 335.56382978723406
56 | ],
57 | [
58 | 349.936170212766,
59 | 369.56382978723406
60 | ],
61 | [
62 | 391.936170212766,
63 | 373.56382978723406
64 | ],
65 | [
66 | 403.936170212766,
67 | 335.56382978723406
68 | ],
69 | [
70 | 425.936170212766,
71 | 332.56382978723406
72 | ],
73 | [
74 | 421.936170212766,
75 | 281.56382978723406
76 | ],
77 | [
78 | 428.936170212766,
79 | 252.56382978723406
80 | ],
81 | [
82 | 428.936170212766,
83 | 236.56382978723406
84 | ],
85 | [
86 | 409.936170212766,
87 | 220.56382978723406
88 | ],
89 | [
90 | 409.936170212766,
91 | 150.56382978723406
92 | ],
93 | [
94 | 430.936170212766,
95 | 143.56382978723406
96 | ],
97 | [
98 | 433.936170212766,
99 | 112.56382978723406
100 | ],
101 | [
102 | 431.936170212766,
103 | 96.56382978723406
104 | ],
105 | [
106 | 408.936170212766,
107 | 90.56382978723406
108 | ],
109 | [
110 | 395.936170212766,
111 | 50.563829787234056
112 | ],
113 | [
114 | 338.936170212766,
115 | 25.563829787234056
116 | ]
117 | ]
118 | },
119 | {
120 | "fill_color": null,
121 | "line_color": null,
122 | "label": "bus-2",
123 | "points": [
124 | [
125 | 88.93617021276599,
126 | 115.56382978723406
127 | ],
128 | [
129 | 0.9361702127659877,
130 | 96.56382978723406
131 | ],
132 | [
133 | 0.0,
134 | 251.968085106388
135 | ],
136 | [
137 | 0.9361702127659877,
138 | 265.56382978723406
139 | ],
140 | [
141 | 27.936170212765987,
142 | 265.56382978723406
143 | ],
144 | [
145 | 29.936170212765987,
146 | 283.56382978723406
147 | ],
148 | [
149 | 63.93617021276599,
150 | 281.56382978723406
151 | ],
152 | [
153 | 89.93617021276599,
154 | 252.56382978723406
155 | ],
156 | [
157 | 100.93617021276599,
158 | 183.56382978723406
159 | ],
160 | [
161 | 108.93617021276599,
162 | 145.56382978723406
163 | ],
164 | [
165 | 81.93617021276599,
166 | 151.56382978723406
167 | ]
168 | ]
169 | },
170 | {
171 | "fill_color": null,
172 | "line_color": null,
173 | "label": "car",
174 | "points": [
175 | [
176 | 413.936170212766,
177 | 168.56382978723406
178 | ],
179 | [
180 | 497.936170212766,
181 | 168.56382978723406
182 | ],
183 | [
184 | 497.936170212766,
185 | 256.56382978723406
186 | ],
187 | [
188 | 431.936170212766,
189 | 258.56382978723406
190 | ],
191 | [
192 | 430.936170212766,
193 | 236.56382978723406
194 | ],
195 | [
196 | 408.936170212766,
197 | 218.56382978723406
198 | ]
199 | ]
200 | }
201 | ],
202 | "fillColor": [
203 | 255,
204 | 0,
205 | 0,
206 | 128
207 | ],
208 | "lineColor": [
209 | 0,
210 | 255,
211 | 0,
212 | 128
213 | ],
214 | "imagePath": "2011_000025.jpg"
215 | }
--------------------------------------------------------------------------------
/labelme/main.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import codecs
3 | import os
4 | import sys
5 |
6 | from qtpy import QtWidgets
7 |
8 | from labelme import __appname__
9 | from labelme import __version__
10 | from labelme.app import MainWindow
11 | from labelme.config import get_config
12 | from labelme import logger
13 | from labelme.utils import newIcon
14 |
15 |
16 | def main():
17 | parser = argparse.ArgumentParser()
18 | parser.add_argument(
19 | '--version', '-V', action='store_true', help='show version'
20 | )
21 | parser.add_argument(
22 | '--reset-config', action='store_true', help='reset qt config'
23 | )
24 | parser.add_argument('filename', nargs='?', help='image or label filename')
25 | parser.add_argument(
26 | '--output',
27 | '-O',
28 | '-o',
29 | help='output file or directory (if it ends with .json it is '
30 | 'recognized as file, else as directory)'
31 | )
32 | default_config_file = os.path.join(os.path.expanduser('~'), '.labelmerc')
33 | parser.add_argument(
34 | '--config',
35 | dest='config_file',
36 | help='config file (default: %s)' % default_config_file,
37 | default=default_config_file,
38 | )
39 | # config for the gui
40 | parser.add_argument(
41 | '--nodata',
42 | dest='store_data',
43 | action='store_false',
44 | help='stop storing image data to JSON file',
45 | default=argparse.SUPPRESS,
46 | )
47 | parser.add_argument(
48 | '--autosave',
49 | dest='auto_save',
50 | action='store_true',
51 | help='auto save',
52 | default=argparse.SUPPRESS,
53 | )
54 | parser.add_argument(
55 | '--nosortlabels',
56 | dest='sort_labels',
57 | action='store_false',
58 | help='stop sorting labels',
59 | default=argparse.SUPPRESS,
60 | )
61 | parser.add_argument(
62 | '--flags',
63 | help='comma separated list of flags OR file containing flags',
64 | default=argparse.SUPPRESS,
65 | )
66 | parser.add_argument(
67 | '--labels',
68 | help='comma separated list of labels OR file containing labels',
69 | default=argparse.SUPPRESS,
70 | )
71 | parser.add_argument(
72 | '--validatelabel',
73 | dest='validate_label',
74 | choices=['exact', 'instance'],
75 | help='label validation types',
76 | default=argparse.SUPPRESS,
77 | )
78 | parser.add_argument(
79 | '--keep-prev',
80 | action='store_true',
81 | help='keep annotation of previous frame',
82 | default=argparse.SUPPRESS,
83 | )
84 | parser.add_argument(
85 | '--epsilon',
86 | type=float,
87 | help='epsilon to find nearest vertex on canvas',
88 | default=argparse.SUPPRESS,
89 | )
90 | args = parser.parse_args()
91 |
92 | if args.version:
93 | print('{0} {1}'.format(__appname__, __version__))
94 | sys.exit(0)
95 |
96 | if hasattr(args, 'flags'):
97 | if os.path.isfile(args.flags):
98 | with codecs.open(args.flags, 'r', encoding='utf-8') as f:
99 | args.flags = [l.strip() for l in f if l.strip()]
100 | else:
101 | args.flags = [l for l in args.flags.split(',') if l]
102 |
103 | if hasattr(args, 'labels'):
104 | if os.path.isfile(args.labels):
105 | with codecs.open(args.labels, 'r', encoding='utf-8') as f:
106 | args.labels = [l.strip() for l in f if l.strip()]
107 | else:
108 | args.labels = [l for l in args.labels.split(',') if l]
109 |
110 | config_from_args = args.__dict__
111 | config_from_args.pop('version')
112 | reset_config = config_from_args.pop('reset_config')
113 | filename = config_from_args.pop('filename')
114 | output = config_from_args.pop('output')
115 | config_file = config_from_args.pop('config_file')
116 | config = get_config(config_from_args, config_file)
117 |
118 | if not config['labels'] and config['validate_label']:
119 | logger.error('--labels must be specified with --validatelabel or '
120 | 'validate_label: true in the config file '
121 | '(ex. ~/.labelmerc).')
122 | sys.exit(1)
123 |
124 | output_file = None
125 | output_dir = None
126 | if output is not None:
127 | if output.endswith('.json'):
128 | output_file = output
129 | else:
130 | output_dir = output
131 |
132 | app = QtWidgets.QApplication(sys.argv)
133 | app.setApplicationName(__appname__)
134 | app.setWindowIcon(newIcon('icon'))
135 | win = MainWindow(
136 | config=config,
137 | filename=filename,
138 | output_file=output_file,
139 | output_dir=output_dir,
140 | )
141 |
142 | if reset_config:
143 | print('Resetting Qt config: %s' % win.settings.fileName())
144 | win.settings.clear()
145 | sys.exit(0)
146 |
147 | win.show()
148 | win.raise_()
149 | sys.exit(app.exec_())
150 |
151 |
152 | if __name__ == '__main__':
153 | main()
154 |
--------------------------------------------------------------------------------
/labelme/label_file.py:
--------------------------------------------------------------------------------
1 | import base64
2 | import json
3 | import os.path
4 |
5 | from . import logger
6 | from . import PY2
7 | from . import utils
8 | from ._version import __version__
9 |
10 |
11 | class LabelFileError(Exception):
12 | pass
13 |
14 |
15 | class LabelFile(object):
16 |
17 | suffix = '.json'
18 |
19 | def __init__(self, filename=None):
20 | self.shapes = ()
21 | self.imagePath = None
22 | self.imageData = None
23 | if filename is not None:
24 | self.load(filename)
25 | self.filename = filename
26 |
27 | def load(self, filename):
28 | keys = [
29 | 'imageData',
30 | 'imagePath',
31 | 'lineColor',
32 | 'fillColor',
33 | 'shapes', # polygonal annotations
34 | 'flags', # image level flags
35 | 'imageHeight',
36 | 'imageWidth',
37 | ]
38 | try:
39 | with open(filename, 'rb' if PY2 else 'r') as f:
40 | data = json.load(f)
41 | if data['imageData'] is not None:
42 | imageData = base64.b64decode(data['imageData'])
43 | else:
44 | # relative path from label file to relative path from cwd
45 | imagePath = os.path.join(os.path.dirname(filename),
46 | data['imagePath'])
47 | with open(imagePath, 'rb') as f:
48 | imageData = f.read()
49 | flags = data.get('flags')
50 | imagePath = data['imagePath']
51 | self._check_image_height_and_width(
52 | base64.b64encode(imageData).decode('utf-8'),
53 | data.get('imageHeight'),
54 | data.get('imageWidth'),
55 | )
56 | lineColor = data['lineColor']
57 | fillColor = data['fillColor']
58 | shapes = (
59 | (
60 | s['label'],
61 | s['points'],
62 | s['line_color'],
63 | s['fill_color'],
64 | s.get('shape_type', 'polygon'),
65 | )
66 | for s in data['shapes']
67 | )
68 | except Exception as e:
69 | raise LabelFileError(e)
70 |
71 | otherData = {}
72 | for key, value in data.items():
73 | if key not in keys:
74 | otherData[key] = value
75 |
76 | # Only replace data after everything is loaded.
77 | self.flags = flags
78 | self.shapes = shapes
79 | self.imagePath = imagePath
80 | self.imageData = imageData
81 | self.lineColor = lineColor
82 | self.fillColor = fillColor
83 | self.filename = filename
84 | self.otherData = otherData
85 |
86 | @staticmethod
87 | def _check_image_height_and_width(imageData, imageHeight, imageWidth):
88 | img_arr = utils.img_b64_to_arr(imageData)
89 | if imageHeight is not None and img_arr.shape[0] != imageHeight:
90 | logger.error(
91 | 'imageHeight does not match with imageData or imagePath, '
92 | 'so getting imageHeight from actual image.'
93 | )
94 | imageHeight = img_arr.shape[0]
95 | if imageWidth is not None and img_arr.shape[1] != imageWidth:
96 | logger.error(
97 | 'imageWidth does not match with imageData or imagePath, '
98 | 'so getting imageWidth from actual image.'
99 | )
100 | imageWidth = img_arr.shape[1]
101 | return imageHeight, imageWidth
102 |
103 | def save(
104 | self,
105 | filename,
106 | shapes,
107 | imagePath,
108 | imageHeight,
109 | imageWidth,
110 | imageData=None,
111 | lineColor=None,
112 | fillColor=None,
113 | otherData=None,
114 | flags=None,
115 | ):
116 | if imageData is not None:
117 | imageData = base64.b64encode(imageData).decode('utf-8')
118 | imageHeight, imageWidth = self._check_image_height_and_width(
119 | imageData, imageHeight, imageWidth
120 | )
121 | if otherData is None:
122 | otherData = {}
123 | if flags is None:
124 | flags = {}
125 | data = dict(
126 | version=__version__,
127 | flags=flags,
128 | shapes=shapes,
129 | lineColor=lineColor,
130 | fillColor=fillColor,
131 | imagePath=imagePath,
132 | imageData=imageData,
133 | imageHeight=imageHeight,
134 | imageWidth=imageWidth,
135 | )
136 | for key, value in otherData.items():
137 | data[key] = value
138 | try:
139 | with open(filename, 'wb' if PY2 else 'w') as f:
140 | json.dump(data, f, ensure_ascii=False, indent=2)
141 | self.filename = filename
142 | except Exception as e:
143 | raise LabelFileError(e)
144 |
145 | @staticmethod
146 | def isLabelFile(filename):
147 | return os.path.splitext(filename)[1].lower() == LabelFile.suffix
148 |
--------------------------------------------------------------------------------
/labelme/utils/draw.py:
--------------------------------------------------------------------------------
1 | import io
2 | import os.path as osp
3 |
4 | import numpy as np
5 | import PIL.Image
6 | import PIL.ImageDraw
7 | import PIL.ImageFont
8 |
9 |
10 | def label_colormap(N=256):
11 |
12 | def bitget(byteval, idx):
13 | return ((byteval & (1 << idx)) != 0)
14 |
15 | cmap = np.zeros((N, 3))
16 | for i in range(0, N):
17 | id = i
18 | r, g, b = 0, 0, 0
19 | for j in range(0, 8):
20 | r = np.bitwise_or(r, (bitget(id, 0) << 7 - j))
21 | g = np.bitwise_or(g, (bitget(id, 1) << 7 - j))
22 | b = np.bitwise_or(b, (bitget(id, 2) << 7 - j))
23 | id = (id >> 3)
24 | cmap[i, 0] = r
25 | cmap[i, 1] = g
26 | cmap[i, 2] = b
27 | cmap = cmap.astype(np.float32) / 255
28 | return cmap
29 |
30 |
31 | def _validate_colormap(colormap, n_labels):
32 | if colormap is None:
33 | colormap = label_colormap(n_labels)
34 | else:
35 | assert colormap.shape == (colormap.shape[0], 3), \
36 | 'colormap must be sequence of RGB values'
37 | assert 0 <= colormap.min() and colormap.max() <= 1, \
38 | 'colormap must ranges 0 to 1'
39 | return colormap
40 |
41 |
42 | # similar function as skimage.color.label2rgb
43 | def label2rgb(
44 | lbl, img=None, n_labels=None, alpha=0.5, thresh_suppress=0, colormap=None,
45 | ):
46 | if n_labels is None:
47 | n_labels = len(np.unique(lbl))
48 |
49 | colormap = _validate_colormap(colormap, n_labels)
50 | colormap = (colormap * 255).astype(np.uint8)
51 |
52 | lbl_viz = colormap[lbl]
53 | lbl_viz[lbl == -1] = (0, 0, 0) # unlabeled
54 |
55 | if img is not None:
56 | img_gray = PIL.Image.fromarray(img).convert('LA')
57 | img_gray = np.asarray(img_gray.convert('RGB'))
58 | # img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
59 | # img_gray = cv2.cvtColor(img_gray, cv2.COLOR_GRAY2RGB)
60 | lbl_viz = alpha * lbl_viz + (1 - alpha) * img_gray
61 | lbl_viz = lbl_viz.astype(np.uint8)
62 |
63 | return lbl_viz
64 |
65 |
66 | def draw_label(label, img=None, label_names=None, colormap=None, **kwargs):
67 | """Draw pixel-wise label with colorization and label names.
68 |
69 | label: ndarray, (H, W)
70 | Pixel-wise labels to colorize.
71 | img: ndarray, (H, W, 3), optional
72 | Image on which the colorized label will be drawn.
73 | label_names: iterable
74 | List of label names.
75 | """
76 | import matplotlib.pyplot as plt
77 |
78 | backend_org = plt.rcParams['backend']
79 | plt.switch_backend('agg')
80 |
81 | plt.subplots_adjust(left=0, right=1, top=1, bottom=0,
82 | wspace=0, hspace=0)
83 | plt.margins(0, 0)
84 | plt.gca().xaxis.set_major_locator(plt.NullLocator())
85 | plt.gca().yaxis.set_major_locator(plt.NullLocator())
86 |
87 | if label_names is None:
88 | label_names = [str(l) for l in range(label.max() + 1)]
89 |
90 | colormap = _validate_colormap(colormap, len(label_names))
91 |
92 | label_viz = label2rgb(
93 | label, img, n_labels=len(label_names), colormap=colormap, **kwargs
94 | )
95 | plt.imshow(label_viz)
96 | plt.axis('off')
97 |
98 | plt_handlers = []
99 | plt_titles = []
100 | for label_value, label_name in enumerate(label_names):
101 | if label_value not in label:
102 | continue
103 | fc = colormap[label_value]
104 | p = plt.Rectangle((0, 0), 1, 1, fc=fc)
105 | plt_handlers.append(p)
106 | plt_titles.append('{value}: {name}'
107 | .format(value=label_value, name=label_name))
108 | plt.legend(plt_handlers, plt_titles, loc='lower right', framealpha=.5)
109 |
110 | f = io.BytesIO()
111 | plt.savefig(f, bbox_inches='tight', pad_inches=0)
112 | plt.cla()
113 | plt.close()
114 |
115 | plt.switch_backend(backend_org)
116 |
117 | out_size = (label_viz.shape[1], label_viz.shape[0])
118 | out = PIL.Image.open(f).resize(out_size, PIL.Image.BILINEAR).convert('RGB')
119 | out = np.asarray(out)
120 | return out
121 |
122 |
123 | def draw_instances(
124 | image=None,
125 | bboxes=None,
126 | labels=None,
127 | masks=None,
128 | captions=None,
129 | ):
130 | import matplotlib
131 |
132 | # TODO(wkentaro)
133 | assert image is not None
134 | assert bboxes is not None
135 | assert labels is not None
136 | assert masks is None
137 | assert captions is not None
138 |
139 | viz = PIL.Image.fromarray(image)
140 | draw = PIL.ImageDraw.ImageDraw(viz)
141 |
142 | font_path = osp.join(
143 | osp.dirname(matplotlib.__file__),
144 | 'mpl-data/fonts/ttf/DejaVuSans.ttf'
145 | )
146 | font = PIL.ImageFont.truetype(font_path)
147 |
148 | colormap = label_colormap(255)
149 | for bbox, label, caption in zip(bboxes, labels, captions):
150 | color = colormap[label]
151 | color = tuple((color * 255).astype(np.uint8).tolist())
152 |
153 | xmin, ymin, xmax, ymax = bbox
154 | draw.rectangle((xmin, ymin, xmax, ymax), outline=color)
155 | draw.text((xmin, ymin), caption, font=font)
156 |
157 | return np.asarray(viz)
158 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: generic
2 |
3 | cache:
4 | - pip
5 |
6 | sudo: required
7 |
8 | dist: trusty
9 |
10 | branches:
11 | only:
12 | - master
13 | - /v\d+\.\d+.\d+/
14 |
15 | notifications:
16 | email: false
17 |
18 | env:
19 | global:
20 | # used by ci-helpers
21 | - SETUP_XVFB=true
22 | - PIP_DEPENDENCIES='hacking pytest pytest-qt'
23 |
24 | - MPLBACKEND=TkAgg # for osx
25 | matrix:
26 | include:
27 | - os: osx
28 | env:
29 | - PYTEST_QT_API=pyqt5
30 | - PYQT_PACKAGE='pyqt=5'
31 | - PYTHON_VERSION=3.6
32 | - RUN_PYINSTALLER=true
33 | - os: linux
34 | dist: trusty
35 | env:
36 | - PYTEST_QT_API=pyqt4v2
37 | - PYQT_PACKAGE='pyqt=4'
38 | - PYTHON_VERSION=2.7
39 | - os: linux
40 | dist: trusty
41 | env:
42 | - PYTEST_QT_API=pyside2
43 | - CONDA_CHANNELS='conda-forge'
44 | - PYQT_PACKAGE='pyside2'
45 | - PYTHON_VERSION=2.7
46 | - os: linux
47 | dist: trusty
48 | env:
49 | - PYTEST_QT_API=pyside2
50 | - CONDA_CHANNELS='conda-forge'
51 | - PYQT_PACKAGE='pyside2'
52 | - PYTHON_VERSION=3.6
53 | - os: linux
54 | dist: trusty
55 | env:
56 | - PYTEST_QT_API=pyqt5
57 | - PYQT_PACKAGE='pyqt=5'
58 | - PYTHON_VERSION=2.7
59 | - os: linux
60 | dist: trusty
61 | env:
62 | - PYTEST_QT_API=pyqt5
63 | - PYQT_PACKAGE='pyqt=5'
64 | - PYTHON_VERSION=3.6
65 | - RUN_PYINSTALLER=true
66 |
67 | install:
68 | # Setup X
69 | - |
70 | if [ $TRAVIS_OS_NAME = "linux" ]; then
71 | sudo apt-get update
72 | # Xvfb / window manager
73 | sudo apt-get install -y xvfb herbstluftwm
74 | elif [ $TRAVIS_OS_NAME = "osx" ]; then
75 | brew cask install xquartz
76 | fi
77 |
78 | # Setup miniconda
79 | - git clone --depth 1 git://github.com/astropy/ci-helpers.git
80 | - CONDA_DEPENDENCIES=$PYQT_PACKAGE source ci-helpers/travis/setup_conda.sh
81 | - source activate test && export LD_LIBRARY_PATH=$CONDA_PREFIX/lib:$LD_LIBRARY_PATH
82 | - pip install .
83 | - rm -rf ci-helpers miniconda.sh
84 |
85 | before_script:
86 | - if [ $TRAVIS_OS_NAME = "linux" ]; then (herbstluftwm )& fi
87 | - if [ $TRAVIS_OS_NAME = "osx" ]; then (sudo Xvfb :99 -ac -screen 0 1024x768x8 )& fi
88 | - sleep 1
89 |
90 | script:
91 | # Run flake8
92 | - flake8 examples labelme setup.py tests
93 |
94 | # Run pytest
95 | - pytest -v tests
96 |
97 | # Test labelme executable
98 | - labelme --help
99 | - labelme --version
100 | - (cd examples/primitives && labelme_json_to_dataset primitives.json && rm -rf primitives_json)
101 | - (cd examples/tutorial && rm -rf apc2016_obj3_json && labelme_json_to_dataset apc2016_obj3.json && python load_label_png.py && git checkout -- .)
102 | - (cd examples/bbox_detection && rm -rf data_dataset_voc && ./labelme2voc.py labels.txt data_annotated data_dataset_voc && git checkout -- .)
103 | - (cd examples/semantic_segmentation && rm -rf data_dataset_voc && ./labelme2voc.py labels.txt data_annotated data_dataset_voc && git checkout -- .)
104 | - (cd examples/instance_segmentation && rm -rf data_dataset_voc && ./labelme2voc.py labels.txt data_annotated data_dataset_voc && git checkout -- .)
105 | - (cd examples/video_annotation && rm -rf data_dataset_voc && ./labelme2voc.py labels.txt data_annotated data_dataset_voc && git checkout -- .)
106 |
107 | # Run pyinstaller
108 | - |
109 | if [ "$RUN_PYINSTALLER" = "true" ]; then
110 | # Cleanup
111 | pip uninstall -y $PIP_DEPENDENCIES
112 |
113 | # https://github.com/wkentaro/labelme/issues/183
114 | if [ $TRAVIS_OS_NAME = "osx" ]; then
115 | pip uninstall -y Pillow
116 | conda install -y Pillow
117 | fi
118 |
119 | # Build the standalone executable
120 | pip install pyinstaller
121 | pyinstaller labelme.spec
122 | dist/labelme --version
123 |
124 | # Create ZIP files for release
125 | cd dist
126 | zip labelme-$(uname).zip labelme
127 | if [ "$TRAVIS_OS_NAME" = "osx" ]; then
128 | zip -r labelme.app-$(uname).zip labelme.app
129 | fi
130 | cd -
131 | fi
132 |
133 | before_deploy:
134 | - git config --local user.name 'Kentaro Wada'
135 | - git config --local user.email 'www.kentaro.wada@gmail.com'
136 |
137 | deploy:
138 | provider: releases
139 | api_key:
140 | secure: S42xawauDsZqLpdRy5d3nAo5Enh6tcJR6IAKwRp4T5+P/ZgXmoSJ81HQrmiUXLJTdvMMjRRr2H/2GuX+WoNHiFLlGY43bWu8GkK1qRslJxC+Fh8+VdIrhVcOFf7xhvhuV26KZxu4uh2lp95p10Crpj4Y3GkxguHjbF+F9c0vEVlEmmgyVsj6c2tFzIXVa4LPuFFDf9yWPl5Sp1n60Y8fDaEOoVbK63vh8pngFE8t2anQl5oNOcLf29t8IciPyo70p1p0UdKZ5a6YsPgu+/x2rPG1uroUksD9s9kY33vL/2D77frGv1eZFNk7TH31LV1TlG3JWklC8tqVG7Qklm4+Hnk+OEo/1uyWeDLJX8IOwy25hbg5VMFkQTZYWhCYg3IihdHbRJL4FAyTasYbEUrkmxniRnLCD8EL6gFHV1zZuv7ACCcdJHZz70sp/eiKZ5TwGRM+qnSNjd9Y4IyRv25vRsZo/8EPKShcKD1ik5OQNc2hw3uM6KLnV6lRCmZ1WSVz08bkLn5X+lzAyw1Bop2ZNdlS9N8qSfRMn2f30ORCVmNP/EKFFIDbt4wkd1XkAG/bF4LFD9VeSTp6RXcie/C/SfwBjgn0hqhdQ/n6Ptcba77OXWnKAc35GkWPs3+axBrYMZno68ne8DlcWIdT2RAeuMVzQMHT52ddvx3oQ6cUXs4=
141 | file_glob: true
142 | file: dist/*.zip
143 | skip_cleanup: true
144 | on:
145 | tags: true
146 | repo: wkentaro/labelme
147 |
148 | after_script:
149 | - true # noop
150 |
--------------------------------------------------------------------------------
/labelme/widgets/label_dialog.py:
--------------------------------------------------------------------------------
1 | from qtpy import QT_VERSION
2 | from qtpy import QtCore
3 | from qtpy import QtGui
4 | from qtpy import QtWidgets
5 |
6 | QT5 = QT_VERSION[0] == '5' # NOQA
7 |
8 | from labelme import logger
9 | import labelme.utils
10 |
11 |
12 | # TODO(unknown):
13 | # - Calculate optimal position so as not to go out of screen area.
14 |
15 |
16 | class LabelQLineEdit(QtWidgets.QLineEdit):
17 |
18 | def setListWidget(self, list_widget):
19 | self.list_widget = list_widget
20 |
21 | def keyPressEvent(self, e):
22 | if e.key() in [QtCore.Qt.Key_Up, QtCore.Qt.Key_Down]:
23 | self.list_widget.keyPressEvent(e)
24 | else:
25 | super(LabelQLineEdit, self).keyPressEvent(e)
26 |
27 |
28 | class LabelDialog(QtWidgets.QDialog):
29 |
30 | def __init__(self, text="Enter object label", parent=None, labels=None,
31 | sort_labels=True, show_text_field=True,
32 | completion='startswith', fit_to_content=None):
33 | if fit_to_content is None:
34 | fit_to_content = {'row': False, 'column': True}
35 | self._fit_to_content = fit_to_content
36 |
37 | super(LabelDialog, self).__init__(parent)
38 | self.edit = LabelQLineEdit()
39 | self.edit.setPlaceholderText(text)
40 | self.edit.setValidator(labelme.utils.labelValidator())
41 | self.edit.editingFinished.connect(self.postProcess)
42 | layout = QtWidgets.QVBoxLayout()
43 | if show_text_field:
44 | layout.addWidget(self.edit)
45 | # buttons
46 | self.buttonBox = bb = QtWidgets.QDialogButtonBox(
47 | QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel,
48 | QtCore.Qt.Horizontal,
49 | self,
50 | )
51 | bb.button(bb.Ok).setIcon(labelme.utils.newIcon('done'))
52 | bb.button(bb.Cancel).setIcon(labelme.utils.newIcon('undo'))
53 | bb.accepted.connect(self.validate)
54 | bb.rejected.connect(self.reject)
55 | layout.addWidget(bb)
56 | # label_list
57 | self.labelList = QtWidgets.QListWidget()
58 | if self._fit_to_content['row']:
59 | self.labelList.setHorizontalScrollBarPolicy(
60 | QtCore.Qt.ScrollBarAlwaysOff
61 | )
62 | if self._fit_to_content['column']:
63 | self.labelList.setVerticalScrollBarPolicy(
64 | QtCore.Qt.ScrollBarAlwaysOff
65 | )
66 | self._sort_labels = sort_labels
67 | if labels:
68 | self.labelList.addItems(labels)
69 | if self._sort_labels:
70 | self.labelList.sortItems()
71 | else:
72 | self.labelList.setDragDropMode(
73 | QtWidgets.QAbstractItemView.InternalMove)
74 | self.labelList.currentItemChanged.connect(self.labelSelected)
75 | self.edit.setListWidget(self.labelList)
76 | layout.addWidget(self.labelList)
77 | self.setLayout(layout)
78 | # completion
79 | completer = QtWidgets.QCompleter()
80 | if not QT5 and completion != 'startswith':
81 | logger.warn(
82 | "completion other than 'startswith' is only "
83 | "supported with Qt5. Using 'startswith'"
84 | )
85 | completion = 'startswith'
86 | if completion == 'startswith':
87 | completer.setCompletionMode(QtWidgets.QCompleter.InlineCompletion)
88 | # Default settings.
89 | # completer.setFilterMode(QtCore.Qt.MatchStartsWith)
90 | elif completion == 'contains':
91 | completer.setCompletionMode(QtWidgets.QCompleter.PopupCompletion)
92 | completer.setFilterMode(QtCore.Qt.MatchContains)
93 | else:
94 | raise ValueError('Unsupported completion: {}'.format(completion))
95 | completer.setModel(self.labelList.model())
96 | self.edit.setCompleter(completer)
97 |
98 | def addLabelHistory(self, label):
99 | if self.labelList.findItems(label, QtCore.Qt.MatchExactly):
100 | return
101 | self.labelList.addItem(label)
102 | if self._sort_labels:
103 | self.labelList.sortItems()
104 |
105 | def labelSelected(self, item):
106 | self.edit.setText(item.text())
107 |
108 | def validate(self):
109 | text = self.edit.text()
110 | if hasattr(text, 'strip'):
111 | text = text.strip()
112 | else:
113 | text = text.trimmed()
114 | if text:
115 | self.accept()
116 |
117 | def postProcess(self):
118 | text = self.edit.text()
119 | if hasattr(text, 'strip'):
120 | text = text.strip()
121 | else:
122 | text = text.trimmed()
123 | self.edit.setText(text)
124 |
125 | def popUp(self, text=None, move=True):
126 | if self._fit_to_content['row']:
127 | self.labelList.setMinimumHeight(
128 | self.labelList.sizeHintForRow(0) * self.labelList.count() + 2
129 | )
130 | if self._fit_to_content['column']:
131 | self.labelList.setMinimumWidth(
132 | self.labelList.sizeHintForColumn(0) + 2
133 | )
134 | # if text is None, the previous label in self.edit is kept
135 | if text is None:
136 | text = self.edit.text()
137 | self.edit.setText(text)
138 | self.edit.setSelection(0, len(text))
139 | items = self.labelList.findItems(text, QtCore.Qt.MatchFixedString)
140 | if items:
141 | assert len(items) == 1
142 | self.labelList.setCurrentItem(items[0])
143 | row = self.labelList.row(items[0])
144 | self.edit.completer().setCurrentRow(row)
145 | self.edit.setFocus(QtCore.Qt.PopupFocusReason)
146 | if move:
147 | self.move(QtGui.QCursor.pos())
148 | return self.edit.text() if self.exec_() else None
149 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # labelme: Image Polygonal Annotation with Python
4 |
5 | [](https://pypi.python.org/pypi/labelme)
6 | [](https://pypi.org/project/labelme)
7 | [](https://travis-ci.org/wkentaro/labelme)
8 | [](https://hub.docker.com/r/wkentaro/labelme)
9 |
10 |
11 | Labelme is a graphical image annotation tool inspired by .
12 | It is written in Python and uses Qt for its graphical interface.
13 |
14 |
15 | Fig 1. Annotation example of instance segmentation.
16 |
17 |
18 | Fig 2. VOC dataset example of instance segmentation.
19 |
20 |
21 | Fig 3. Other examples (semantic segmentation, bbox detection, and classification).
22 |
23 |
24 | Fig 4. Various primitives (polygon, rectangle, circle, line, and point).
25 |
26 |
27 | ## Features
28 |
29 | - [x] Image annotation for polygon, rectangle, circle, line and point. ([tutorial](examples/tutorial))
30 | - [x] Image flag annotation for classification and cleaning. ([#166](https://github.com/wkentaro/labelme/pull/166))
31 | - [x] Video annotation. ([video annotation](examples/video_annotation))
32 | - [x] GUI customization (predefined labels / flags, auto-saving, label validation, etc). ([#144](https://github.com/wkentaro/labelme/pull/144))
33 | - [x] Exporting VOC-like dataset for semantic/instance segmentation. ([semantic segmentation](examples/semantic_segmentation), [instance segmentation](examples/instance_segmentation))
34 |
35 |
36 |
37 | ## Requirements
38 |
39 | - Ubuntu / macOS / Windows
40 | - Python2 / Python3
41 | - [PyQt4 / PyQt5](http://www.riverbankcomputing.co.uk/software/pyqt/intro) / [PySide2](https://wiki.qt.io/PySide2_GettingStarted)
42 |
43 |
44 | ## Installation
45 |
46 | There are options:
47 |
48 | - Platform agonistic installation: [Anaconda](#anaconda), [Docker](#docker)
49 | - Platform specific installation: [Ubuntu](#ubuntu), [macOS](#macos), [Windows](#windows)
50 |
51 | ### Anaconda
52 |
53 | You need install [Anaconda](https://www.continuum.io/downloads), then run below:
54 |
55 | ```bash
56 | # python2
57 | conda create --name=labelme python=2.7
58 | source activate labelme
59 | # conda install -c conda-forge pyside2
60 | conda install pyqt
61 | pip install labelme
62 | # if you'd like to use the latest version. run below:
63 | # pip install git+https://github.com/wkentaro/labelme.git
64 |
65 | # python3
66 | conda create --name=labelme python=3.6
67 | source activate labelme
68 | # conda install -c conda-forge pyside2
69 | # conda install pyqt
70 | pip install pyqt5 # pyqt5 can be installed via pip on python3
71 | pip install labelme
72 | ```
73 |
74 | ### Docker
75 |
76 | You need install [docker](https://www.docker.com), then run below:
77 |
78 | ```bash
79 | wget https://raw.githubusercontent.com/wkentaro/labelme/master/labelme/cli/on_docker.py -O labelme_on_docker
80 | chmod u+x labelme_on_docker
81 |
82 | # Maybe you need http://sourabhbajaj.com/blog/2017/02/07/gui-applications-docker-mac/ on macOS
83 | ./labelme_on_docker examples/tutorial/apc2016_obj3.jpg -O examples/tutorial/apc2016_obj3.json
84 | ./labelme_on_docker examples/semantic_segmentation/data_annotated
85 | ```
86 |
87 | ### Ubuntu
88 |
89 | ```bash
90 | # Ubuntu 14.04 / Ubuntu 16.04
91 | # Python2
92 | # sudo apt-get install python-qt4 # PyQt4
93 | sudo apt-get install python-pyqt5 # PyQt5
94 | sudo pip install labelme
95 | # Python3
96 | sudo apt-get install python3-pyqt5 # PyQt5
97 | sudo pip3 install labelme
98 | ```
99 |
100 | ### macOS
101 |
102 | ```bash
103 | # macOS Sierra
104 | brew install pyqt # maybe pyqt5
105 | pip install labelme # both python2/3 should work
106 |
107 | # or install standalone executable / app
108 | brew install wkentaro/labelme/labelme
109 | brew cask install wkentaro/labelme/labelme
110 | ```
111 |
112 | ### Windows
113 |
114 | Firstly, follow instruction in [Anaconda](#anaconda).
115 |
116 | ```bash
117 | # Pillow 5 causes dll load error on Windows.
118 | # https://github.com/wkentaro/labelme/pull/174
119 | conda install pillow=4.0.0
120 | ```
121 |
122 |
123 | ## Usage
124 |
125 | Run `labelme --help` for detail.
126 | The annotations are saved as a [JSON](http://www.json.org/) file.
127 |
128 | ```bash
129 | labelme # just open gui
130 |
131 | # tutorial (single image example)
132 | cd examples/tutorial
133 | labelme apc2016_obj3.jpg # specify image file
134 | labelme apc2016_obj3.jpg -O apc2016_obj3.json # close window after the save
135 | labelme apc2016_obj3.jpg --nodata # not include image data but relative image path in JSON file
136 | labelme apc2016_obj3.jpg \
137 | --labels highland_6539_self_stick_notes,mead_index_cards,kong_air_dog_squeakair_tennis_ball # specify label list
138 |
139 | # semantic segmentation example
140 | cd examples/semantic_segmentation
141 | labelme data_annotated/ # Open directory to annotate all images in it
142 | labelme data_annotated/ --labels labels.txt # specify label list with a file
143 | ```
144 |
145 | For more advanced usage, please refer to the examples:
146 |
147 | * [Tutorial (Single Image Example)](examples/tutorial)
148 | * [Semantic Segmentation Example](examples/semantic_segmentation)
149 | * [Instance Segmentation Example](examples/instance_segmentation)
150 | * [Video Annotation Example](examples/video_annotation)
151 |
152 |
153 | ## FAQ
154 |
155 | - **How to convert JSON file to numpy array?** See [examples/tutorial](examples/tutorial#convert-to-dataset).
156 | - **How to load label PNG file?** See [examples/tutorial](examples/tutorial#how-to-load-label-png-file).
157 | - **How to get annotations for semantic segmentation?** See [examples/semantic_segmentation](examples/semantic_segmentation).
158 | - **How to get annotations for instance segmentation?** See [examples/instance_segmentation](examples/instance_segmentation).
159 |
160 |
161 | ## Screencast
162 |
163 |
164 |
165 |
166 | ## Testing
167 |
168 | ```bash
169 | pip install hacking pytest pytest-qt
170 | flake8 .
171 | pytest -v tests
172 | ```
173 |
174 |
175 | ## Developing
176 |
177 | ```bash
178 | git clone https://github.com/wkentaro/labelme.git
179 | cd labelme
180 |
181 | # Install anaconda3 and labelme
182 | curl -L https://github.com/wkentaro/dotfiles/raw/master/local/bin/install_anaconda3.sh | bash -s .
183 | source .anaconda3/bin/activate
184 | pip install -e .
185 | ```
186 |
187 |
188 | ## How to build standalone executable
189 |
190 | Below shows how to build the standalone executable on macOS, Linux and Windows.
191 | Also, there are pre-built executables in
192 | [the release section](https://github.com/wkentaro/labelme/releases).
193 |
194 | ```bash
195 | # Setup conda
196 | conda create --name labelme python==3.6.0
197 | conda activate labelme
198 |
199 | # Build the standalone executable
200 | pip install .
201 | pip install pyinstaller
202 | pyinstaller labelme.spec
203 | dist/labelme --version
204 | ```
205 |
206 |
207 | ## Acknowledgement
208 |
209 | This repo is the fork of [mpitid/pylabelme](https://github.com/mpitid/pylabelme),
210 | whose development has already stopped.
211 |
--------------------------------------------------------------------------------
/labelme/shape.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import math
3 |
4 | from qtpy import QtCore
5 | from qtpy import QtGui
6 |
7 | import labelme.utils
8 |
9 |
10 | # TODO(unknown):
11 | # - [opt] Store paths instead of creating new ones at each paint.
12 |
13 |
14 | DEFAULT_LINE_COLOR = QtGui.QColor(0, 255, 0, 128)
15 | DEFAULT_FILL_COLOR = QtGui.QColor(255, 0, 0, 128)
16 | DEFAULT_SELECT_LINE_COLOR = QtGui.QColor(255, 255, 255)
17 | DEFAULT_SELECT_FILL_COLOR = QtGui.QColor(0, 128, 255, 155)
18 | DEFAULT_VERTEX_FILL_COLOR = QtGui.QColor(0, 255, 0, 255)
19 | DEFAULT_HVERTEX_FILL_COLOR = QtGui.QColor(255, 0, 0)
20 |
21 |
22 | class Shape(object):
23 |
24 | P_SQUARE, P_ROUND = 0, 1
25 |
26 | MOVE_VERTEX, NEAR_VERTEX = 0, 1
27 |
28 | # The following class variables influence the drawing of all shape objects.
29 | line_color = DEFAULT_LINE_COLOR
30 | fill_color = DEFAULT_FILL_COLOR
31 | select_line_color = DEFAULT_SELECT_LINE_COLOR
32 | select_fill_color = DEFAULT_SELECT_FILL_COLOR
33 | vertex_fill_color = DEFAULT_VERTEX_FILL_COLOR
34 | hvertex_fill_color = DEFAULT_HVERTEX_FILL_COLOR
35 | point_type = P_ROUND
36 | point_size = 8
37 | scale = 1.0
38 |
39 | def __init__(self, label=None, line_color=None, shape_type=None):
40 | self.label = label
41 | self.points = []
42 | self.fill = False
43 | self.selected = False
44 | self.shape_type = shape_type
45 |
46 | self._highlightIndex = None
47 | self._highlightMode = self.NEAR_VERTEX
48 | self._highlightSettings = {
49 | self.NEAR_VERTEX: (4, self.P_ROUND),
50 | self.MOVE_VERTEX: (1.5, self.P_SQUARE),
51 | }
52 |
53 | self._closed = False
54 |
55 | if line_color is not None:
56 | # Override the class line_color attribute
57 | # with an object attribute. Currently this
58 | # is used for drawing the pending line a different color.
59 | self.line_color = line_color
60 |
61 | self.shape_type = shape_type
62 |
63 | @property
64 | def shape_type(self):
65 | return self._shape_type
66 |
67 | @shape_type.setter
68 | def shape_type(self, value):
69 | if value is None:
70 | value = 'polygon'
71 | if value not in ['polygon', 'rectangle', 'point',
72 | 'line', 'circle', 'linestrip']:
73 | raise ValueError('Unexpected shape_type: {}'.format(value))
74 | self._shape_type = value
75 |
76 | def close(self):
77 | self._closed = True
78 |
79 | def addPoint(self, point):
80 | if self.points and point == self.points[0]:
81 | self.close()
82 | else:
83 | self.points.append(point)
84 |
85 | def popPoint(self):
86 | if self.points:
87 | return self.points.pop()
88 | return None
89 |
90 | def insertPoint(self, i, point):
91 | self.points.insert(i, point)
92 |
93 | def isClosed(self):
94 | return self._closed
95 |
96 | def setOpen(self):
97 | self._closed = False
98 |
99 | def getRectFromLine(self, pt1, pt2):
100 | x1, y1 = pt1.x(), pt1.y()
101 | x2, y2 = pt2.x(), pt2.y()
102 | return QtCore.QRectF(x1, y1, x2 - x1, y2 - y1)
103 |
104 | def paint(self, painter):
105 | if self.points:
106 | color = self.select_line_color \
107 | if self.selected else self.line_color
108 | pen = QtGui.QPen(color)
109 | # Try using integer sizes for smoother drawing(?)
110 | pen.setWidth(max(1, int(round(2.0 / self.scale))))
111 | painter.setPen(pen)
112 |
113 | line_path = QtGui.QPainterPath()
114 | vrtx_path = QtGui.QPainterPath()
115 |
116 | if self.shape_type == 'rectangle':
117 | assert len(self.points) in [1, 2]
118 | if len(self.points) == 2:
119 | rectangle = self.getRectFromLine(*self.points)
120 | line_path.addRect(rectangle)
121 | for i in range(len(self.points)):
122 | self.drawVertex(vrtx_path, i)
123 | elif self.shape_type == "circle":
124 | assert len(self.points) in [1, 2]
125 | if len(self.points) == 2:
126 | rectangle = self.getCircleRectFromLine(self.points)
127 | line_path.addEllipse(rectangle)
128 | for i in range(len(self.points)):
129 | self.drawVertex(vrtx_path, i)
130 | elif self.shape_type == "linestrip":
131 | line_path.moveTo(self.points[0])
132 | for i, p in enumerate(self.points):
133 | line_path.lineTo(p)
134 | self.drawVertex(vrtx_path, i)
135 | else:
136 | line_path.moveTo(self.points[0])
137 | # Uncommenting the following line will draw 2 paths
138 | # for the 1st vertex, and make it non-filled, which
139 | # may be desirable.
140 | # self.drawVertex(vrtx_path, 0)
141 |
142 | for i, p in enumerate(self.points):
143 | line_path.lineTo(p)
144 | self.drawVertex(vrtx_path, i)
145 | if self.isClosed():
146 | line_path.lineTo(self.points[0])
147 |
148 | painter.drawPath(line_path)
149 | painter.drawPath(vrtx_path)
150 | painter.fillPath(vrtx_path, self.vertex_fill_color)
151 | if self.fill:
152 | color = self.select_fill_color \
153 | if self.selected else self.fill_color
154 | painter.fillPath(line_path, color)
155 |
156 | def drawVertex(self, path, i):
157 | d = self.point_size / self.scale
158 | shape = self.point_type
159 | point = self.points[i]
160 | if i == self._highlightIndex:
161 | size, shape = self._highlightSettings[self._highlightMode]
162 | d *= size
163 | if self._highlightIndex is not None:
164 | self.vertex_fill_color = self.hvertex_fill_color
165 | else:
166 | self.vertex_fill_color = Shape.vertex_fill_color
167 | if shape == self.P_SQUARE:
168 | path.addRect(point.x() - d / 2, point.y() - d / 2, d, d)
169 | elif shape == self.P_ROUND:
170 | path.addEllipse(point, d / 2.0, d / 2.0)
171 | else:
172 | assert False, "unsupported vertex shape"
173 |
174 | def nearestVertex(self, point, epsilon):
175 | min_distance = float('inf')
176 | min_i = None
177 | for i, p in enumerate(self.points):
178 | dist = labelme.utils.distance(p - point)
179 | if dist <= epsilon and dist < min_distance:
180 | min_distance = dist
181 | min_i = i
182 | return min_i
183 |
184 | def nearestEdge(self, point, epsilon):
185 | min_distance = float('inf')
186 | post_i = None
187 | for i in range(len(self.points)):
188 | line = [self.points[i - 1], self.points[i]]
189 | dist = labelme.utils.distancetoline(point, line)
190 | if dist <= epsilon and dist < min_distance:
191 | min_distance = dist
192 | post_i = i
193 | return post_i
194 |
195 | def containsPoint(self, point):
196 | return self.makePath().contains(point)
197 |
198 | def getCircleRectFromLine(self, line):
199 | """Computes parameters to draw with `QPainterPath::addEllipse`"""
200 | if len(line) != 2:
201 | return None
202 | (c, point) = line
203 | r = line[0] - line[1]
204 | d = math.sqrt(math.pow(r.x(), 2) + math.pow(r.y(), 2))
205 | rectangle = QtCore.QRectF(c.x() - d, c.y() - d, 2 * d, 2 * d)
206 | return rectangle
207 |
208 | def makePath(self):
209 | if self.shape_type == 'rectangle':
210 | path = QtGui.QPainterPath()
211 | if len(self.points) == 2:
212 | rectangle = self.getRectFromLine(*self.points)
213 | path.addRect(rectangle)
214 | elif self.shape_type == "circle":
215 | path = QtGui.QPainterPath()
216 | if len(self.points) == 2:
217 | rectangle = self.getCircleRectFromLine(self.points)
218 | path.addEllipse(rectangle)
219 | else:
220 | path = QtGui.QPainterPath(self.points[0])
221 | for p in self.points[1:]:
222 | path.lineTo(p)
223 | return path
224 |
225 | def boundingRect(self):
226 | return self.makePath().boundingRect()
227 |
228 | def moveBy(self, offset):
229 | self.points = [p + offset for p in self.points]
230 |
231 | def moveVertexBy(self, i, offset):
232 | self.points[i] = self.points[i] + offset
233 |
234 | def highlightVertex(self, i, action):
235 | self._highlightIndex = i
236 | self._highlightMode = action
237 |
238 | def highlightClear(self):
239 | self._highlightIndex = None
240 |
241 | def copy(self):
242 | shape = Shape(label=self.label, shape_type=self.shape_type)
243 | shape.points = [copy.deepcopy(p) for p in self.points]
244 | shape.fill = self.fill
245 | shape.selected = self.selected
246 | shape._closed = self._closed
247 | shape.line_color = copy.deepcopy(self.line_color)
248 | shape.fill_color = copy.deepcopy(self.fill_color)
249 | return shape
250 |
251 | def __len__(self):
252 | return len(self.points)
253 |
254 | def __getitem__(self, key):
255 | return self.points[key]
256 |
257 | def __setitem__(self, key, value):
258 | self.points[key] = value
259 |
--------------------------------------------------------------------------------
/examples/semantic_segmentation/data_annotated/2011_000003.json:
--------------------------------------------------------------------------------
1 | {
2 | "shapes": [
3 | {
4 | "label": "person",
5 | "line_color": null,
6 | "fill_color": null,
7 | "points": [
8 | [
9 | 250.8142292490119,
10 | 107.33596837944665
11 | ],
12 | [
13 | 229.8142292490119,
14 | 119.33596837944665
15 | ],
16 | [
17 | 221.8142292490119,
18 | 135.33596837944665
19 | ],
20 | [
21 | 223.8142292490119,
22 | 148.33596837944665
23 | ],
24 | [
25 | 217.8142292490119,
26 | 161.33596837944665
27 | ],
28 | [
29 | 202.8142292490119,
30 | 168.33596837944665
31 | ],
32 | [
33 | 192.8142292490119,
34 | 200.33596837944665
35 | ],
36 | [
37 | 194.8142292490119,
38 | 222.33596837944665
39 | ],
40 | [
41 | 199.8142292490119,
42 | 227.33596837944665
43 | ],
44 | [
45 | 191.8142292490119,
46 | 234.33596837944665
47 | ],
48 | [
49 | 197.8142292490119,
50 | 264.3359683794467
51 | ],
52 | [
53 | 213.8142292490119,
54 | 295.3359683794467
55 | ],
56 | [
57 | 214.8142292490119,
58 | 320.3359683794467
59 | ],
60 | [
61 | 221.8142292490119,
62 | 327.3359683794467
63 | ],
64 | [
65 | 235.8142292490119,
66 | 326.3359683794467
67 | ],
68 | [
69 | 240.8142292490119,
70 | 323.3359683794467
71 | ],
72 | [
73 | 235.8142292490119,
74 | 298.3359683794467
75 | ],
76 | [
77 | 238.8142292490119,
78 | 287.3359683794467
79 | ],
80 | [
81 | 234.8142292490119,
82 | 268.3359683794467
83 | ],
84 | [
85 | 257.81422924901193,
86 | 258.3359683794467
87 | ],
88 | [
89 | 264.81422924901193,
90 | 264.3359683794467
91 | ],
92 | [
93 | 256.81422924901193,
94 | 273.3359683794467
95 | ],
96 | [
97 | 259.81422924901193,
98 | 282.3359683794467
99 | ],
100 | [
101 | 284.81422924901193,
102 | 288.3359683794467
103 | ],
104 | [
105 | 297.81422924901193,
106 | 278.3359683794467
107 | ],
108 | [
109 | 288.81422924901193,
110 | 270.3359683794467
111 | ],
112 | [
113 | 281.81422924901193,
114 | 270.3359683794467
115 | ],
116 | [
117 | 283.81422924901193,
118 | 264.3359683794467
119 | ],
120 | [
121 | 292.81422924901193,
122 | 261.3359683794467
123 | ],
124 | [
125 | 308.81422924901193,
126 | 236.33596837944665
127 | ],
128 | [
129 | 313.81422924901193,
130 | 217.33596837944665
131 | ],
132 | [
133 | 309.81422924901193,
134 | 208.33596837944665
135 | ],
136 | [
137 | 312.81422924901193,
138 | 202.33596837944665
139 | ],
140 | [
141 | 308.81422924901193,
142 | 185.33596837944665
143 | ],
144 | [
145 | 291.81422924901193,
146 | 173.33596837944665
147 | ],
148 | [
149 | 269.81422924901193,
150 | 159.33596837944665
151 | ],
152 | [
153 | 261.81422924901193,
154 | 154.33596837944665
155 | ],
156 | [
157 | 264.81422924901193,
158 | 142.33596837944665
159 | ],
160 | [
161 | 273.81422924901193,
162 | 137.33596837944665
163 | ],
164 | [
165 | 278.81422924901193,
166 | 130.33596837944665
167 | ],
168 | [
169 | 270.81422924901193,
170 | 121.33596837944665
171 | ]
172 | ]
173 | },
174 | {
175 | "label": "person",
176 | "line_color": null,
177 | "fill_color": null,
178 | "points": [
179 | [
180 | 482.81422924901193,
181 | 85.33596837944665
182 | ],
183 | [
184 | 468.81422924901193,
185 | 90.33596837944665
186 | ],
187 | [
188 | 460.81422924901193,
189 | 110.33596837944665
190 | ],
191 | [
192 | 460.81422924901193,
193 | 127.33596837944665
194 | ],
195 | [
196 | 444.81422924901193,
197 | 137.33596837944665
198 | ],
199 | [
200 | 419.81422924901193,
201 | 153.33596837944665
202 | ],
203 | [
204 | 410.81422924901193,
205 | 163.33596837944665
206 | ],
207 | [
208 | 403.81422924901193,
209 | 168.33596837944665
210 | ],
211 | [
212 | 394.81422924901193,
213 | 170.33596837944665
214 | ],
215 | [
216 | 386.81422924901193,
217 | 168.33596837944665
218 | ],
219 | [
220 | 386.81422924901193,
221 | 184.33596837944665
222 | ],
223 | [
224 | 392.81422924901193,
225 | 182.33596837944665
226 | ],
227 | [
228 | 410.81422924901193,
229 | 187.33596837944665
230 | ],
231 | [
232 | 414.81422924901193,
233 | 192.33596837944665
234 | ],
235 | [
236 | 437.81422924901193,
237 | 189.33596837944665
238 | ],
239 | [
240 | 434.81422924901193,
241 | 204.33596837944665
242 | ],
243 | [
244 | 390.81422924901193,
245 | 195.33596837944665
246 | ],
247 | [
248 | 386.81422924901193,
249 | 195.33596837944665
250 | ],
251 | [
252 | 387.81422924901193,
253 | 208.33596837944665
254 | ],
255 | [
256 | 381.81422924901193,
257 | 212.33596837944665
258 | ],
259 | [
260 | 372.81422924901193,
261 | 212.33596837944665
262 | ],
263 | [
264 | 372.81422924901193,
265 | 216.33596837944665
266 | ],
267 | [
268 | 400.81422924901193,
269 | 270.3359683794467
270 | ],
271 | [
272 | 389.81422924901193,
273 | 272.3359683794467
274 | ],
275 | [
276 | 389.81422924901193,
277 | 274.3359683794467
278 | ],
279 | [
280 | 403.81422924901193,
281 | 282.3359683794467
282 | ],
283 | [
284 | 444.81422924901193,
285 | 283.3359683794467
286 | ],
287 | [
288 | 443.81422924901193,
289 | 259.3359683794467
290 | ],
291 | [
292 | 426.81422924901193,
293 | 244.33596837944665
294 | ],
295 | [
296 | 462.81422924901193,
297 | 256.3359683794467
298 | ],
299 | [
300 | 474.81422924901193,
301 | 270.3359683794467
302 | ],
303 | [
304 | 477.81422924901193,
305 | 280.3359683794467
306 | ],
307 | [
308 | 473.81422924901193,
309 | 289.3359683794467
310 | ],
311 | [
312 | 471.81422924901193,
313 | 296.3359683794467
314 | ],
315 | [
316 | 472.81422924901193,
317 | 317.3359683794467
318 | ],
319 | [
320 | 480.81422924901193,
321 | 332.3359683794467
322 | ],
323 | [
324 | 494.81422924901193,
325 | 335.3359683794467
326 | ],
327 | [
328 | 498.81422924901193,
329 | 329.3359683794467
330 | ],
331 | [
332 | 494.81422924901193,
333 | 308.3359683794467
334 | ],
335 | [
336 | 499.81422924901193,
337 | 297.3359683794467
338 | ],
339 | [
340 | 499.81422924901193,
341 | 90.33596837944665
342 | ]
343 | ]
344 | },
345 | {
346 | "label": "bottle",
347 | "line_color": null,
348 | "fill_color": null,
349 | "points": [
350 | [
351 | 374.81422924901193,
352 | 159.33596837944665
353 | ],
354 | [
355 | 369.81422924901193,
356 | 170.33596837944665
357 | ],
358 | [
359 | 369.81422924901193,
360 | 210.33596837944665
361 | ],
362 | [
363 | 375.81422924901193,
364 | 212.33596837944665
365 | ],
366 | [
367 | 387.81422924901193,
368 | 209.33596837944665
369 | ],
370 | [
371 | 385.81422924901193,
372 | 185.33596837944665
373 | ],
374 | [
375 | 385.81422924901193,
376 | 168.33596837944665
377 | ],
378 | [
379 | 385.81422924901193,
380 | 165.33596837944665
381 | ],
382 | [
383 | 382.81422924901193,
384 | 159.33596837944665
385 | ]
386 | ]
387 | },
388 | {
389 | "label": "person",
390 | "line_color": null,
391 | "fill_color": null,
392 | "points": [
393 | [
394 | 370.81422924901193,
395 | 170.33596837944665
396 | ],
397 | [
398 | 366.81422924901193,
399 | 173.33596837944665
400 | ],
401 | [
402 | 365.81422924901193,
403 | 182.33596837944665
404 | ],
405 | [
406 | 368.81422924901193,
407 | 185.33596837944665
408 | ]
409 | ]
410 | },
411 | {
412 | "label": "__ignore__",
413 | "line_color": null,
414 | "fill_color": null,
415 | "points": [
416 | [
417 | 338.81422924901193,
418 | 266.3359683794467
419 | ],
420 | [
421 | 313.81422924901193,
422 | 269.3359683794467
423 | ],
424 | [
425 | 297.81422924901193,
426 | 277.3359683794467
427 | ],
428 | [
429 | 282.81422924901193,
430 | 288.3359683794467
431 | ],
432 | [
433 | 273.81422924901193,
434 | 302.3359683794467
435 | ],
436 | [
437 | 272.81422924901193,
438 | 320.3359683794467
439 | ],
440 | [
441 | 279.81422924901193,
442 | 337.3359683794467
443 | ],
444 | [
445 | 428.81422924901193,
446 | 337.3359683794467
447 | ],
448 | [
449 | 432.81422924901193,
450 | 316.3359683794467
451 | ],
452 | [
453 | 423.81422924901193,
454 | 296.3359683794467
455 | ],
456 | [
457 | 403.81422924901193,
458 | 283.3359683794467
459 | ],
460 | [
461 | 370.81422924901193,
462 | 270.3359683794467
463 | ]
464 | ]
465 | }
466 | ],
467 | "lineColor": [
468 | 0,
469 | 255,
470 | 0,
471 | 128
472 | ],
473 | "fillColor": [
474 | 255,
475 | 0,
476 | 0,
477 | 128
478 | ],
479 | "imagePath": "2011_000003.jpg",
480 | "imageData": null
481 | }
--------------------------------------------------------------------------------
/examples/instance_segmentation/data_annotated/2011_000003.json:
--------------------------------------------------------------------------------
1 | {
2 | "imagePath": "2011_000003.jpg",
3 | "shapes": [
4 | {
5 | "line_color": null,
6 | "points": [
7 | [
8 | 251.8142292490119,
9 | 107.33596837944665
10 | ],
11 | [
12 | 230.8142292490119,
13 | 119.33596837944665
14 | ],
15 | [
16 | 222.8142292490119,
17 | 135.33596837944665
18 | ],
19 | [
20 | 224.8142292490119,
21 | 148.33596837944665
22 | ],
23 | [
24 | 218.8142292490119,
25 | 161.33596837944665
26 | ],
27 | [
28 | 203.8142292490119,
29 | 168.33596837944665
30 | ],
31 | [
32 | 193.8142292490119,
33 | 200.33596837944665
34 | ],
35 | [
36 | 195.8142292490119,
37 | 222.33596837944665
38 | ],
39 | [
40 | 200.8142292490119,
41 | 227.33596837944665
42 | ],
43 | [
44 | 192.8142292490119,
45 | 234.33596837944665
46 | ],
47 | [
48 | 198.8142292490119,
49 | 264.3359683794467
50 | ],
51 | [
52 | 214.8142292490119,
53 | 295.3359683794467
54 | ],
55 | [
56 | 215.8142292490119,
57 | 320.3359683794467
58 | ],
59 | [
60 | 222.8142292490119,
61 | 327.3359683794467
62 | ],
63 | [
64 | 236.8142292490119,
65 | 326.3359683794467
66 | ],
67 | [
68 | 241.8142292490119,
69 | 323.3359683794467
70 | ],
71 | [
72 | 236.8142292490119,
73 | 298.3359683794467
74 | ],
75 | [
76 | 239.8142292490119,
77 | 287.3359683794467
78 | ],
79 | [
80 | 235.8142292490119,
81 | 268.3359683794467
82 | ],
83 | [
84 | 258.81422924901193,
85 | 258.3359683794467
86 | ],
87 | [
88 | 265.81422924901193,
89 | 264.3359683794467
90 | ],
91 | [
92 | 257.81422924901193,
93 | 273.3359683794467
94 | ],
95 | [
96 | 260.81422924901193,
97 | 282.3359683794467
98 | ],
99 | [
100 | 285.81422924901193,
101 | 288.3359683794467
102 | ],
103 | [
104 | 298.81422924901193,
105 | 278.3359683794467
106 | ],
107 | [
108 | 289.81422924901193,
109 | 270.3359683794467
110 | ],
111 | [
112 | 282.81422924901193,
113 | 270.3359683794467
114 | ],
115 | [
116 | 284.81422924901193,
117 | 264.3359683794467
118 | ],
119 | [
120 | 293.81422924901193,
121 | 261.3359683794467
122 | ],
123 | [
124 | 309.81422924901193,
125 | 236.33596837944665
126 | ],
127 | [
128 | 314.81422924901193,
129 | 217.33596837944665
130 | ],
131 | [
132 | 310.81422924901193,
133 | 208.33596837944665
134 | ],
135 | [
136 | 313.81422924901193,
137 | 202.33596837944665
138 | ],
139 | [
140 | 309.81422924901193,
141 | 185.33596837944665
142 | ],
143 | [
144 | 292.81422924901193,
145 | 173.33596837944665
146 | ],
147 | [
148 | 270.81422924901193,
149 | 159.33596837944665
150 | ],
151 | [
152 | 262.81422924901193,
153 | 154.33596837944665
154 | ],
155 | [
156 | 265.81422924901193,
157 | 142.33596837944665
158 | ],
159 | [
160 | 274.81422924901193,
161 | 137.33596837944665
162 | ],
163 | [
164 | 279.81422924901193,
165 | 130.33596837944665
166 | ],
167 | [
168 | 271.81422924901193,
169 | 121.33596837944665
170 | ]
171 | ],
172 | "fill_color": null,
173 | "label": "person-1"
174 | },
175 | {
176 | "line_color": null,
177 | "points": [
178 | [
179 | 482.81422924901193,
180 | 85.33596837944665
181 | ],
182 | [
183 | 468.81422924901193,
184 | 90.33596837944665
185 | ],
186 | [
187 | 460.81422924901193,
188 | 110.33596837944665
189 | ],
190 | [
191 | 460.81422924901193,
192 | 127.33596837944665
193 | ],
194 | [
195 | 444.81422924901193,
196 | 137.33596837944665
197 | ],
198 | [
199 | 419.81422924901193,
200 | 153.33596837944665
201 | ],
202 | [
203 | 410.81422924901193,
204 | 163.33596837944665
205 | ],
206 | [
207 | 403.81422924901193,
208 | 168.33596837944665
209 | ],
210 | [
211 | 394.81422924901193,
212 | 170.33596837944665
213 | ],
214 | [
215 | 386.81422924901193,
216 | 168.33596837944665
217 | ],
218 | [
219 | 386.81422924901193,
220 | 184.33596837944665
221 | ],
222 | [
223 | 392.81422924901193,
224 | 182.33596837944665
225 | ],
226 | [
227 | 410.81422924901193,
228 | 187.33596837944665
229 | ],
230 | [
231 | 414.81422924901193,
232 | 192.33596837944665
233 | ],
234 | [
235 | 437.81422924901193,
236 | 189.33596837944665
237 | ],
238 | [
239 | 434.81422924901193,
240 | 204.33596837944665
241 | ],
242 | [
243 | 390.81422924901193,
244 | 195.33596837944665
245 | ],
246 | [
247 | 386.81422924901193,
248 | 195.33596837944665
249 | ],
250 | [
251 | 387.81422924901193,
252 | 208.33596837944665
253 | ],
254 | [
255 | 381.81422924901193,
256 | 212.33596837944665
257 | ],
258 | [
259 | 372.81422924901193,
260 | 212.33596837944665
261 | ],
262 | [
263 | 372.81422924901193,
264 | 216.33596837944665
265 | ],
266 | [
267 | 400.81422924901193,
268 | 270.3359683794467
269 | ],
270 | [
271 | 389.81422924901193,
272 | 272.3359683794467
273 | ],
274 | [
275 | 389.81422924901193,
276 | 274.3359683794467
277 | ],
278 | [
279 | 403.81422924901193,
280 | 282.3359683794467
281 | ],
282 | [
283 | 444.81422924901193,
284 | 283.3359683794467
285 | ],
286 | [
287 | 443.81422924901193,
288 | 259.3359683794467
289 | ],
290 | [
291 | 426.81422924901193,
292 | 244.33596837944665
293 | ],
294 | [
295 | 462.81422924901193,
296 | 256.3359683794467
297 | ],
298 | [
299 | 474.81422924901193,
300 | 270.3359683794467
301 | ],
302 | [
303 | 477.81422924901193,
304 | 280.3359683794467
305 | ],
306 | [
307 | 473.81422924901193,
308 | 289.3359683794467
309 | ],
310 | [
311 | 471.81422924901193,
312 | 296.3359683794467
313 | ],
314 | [
315 | 472.81422924901193,
316 | 317.3359683794467
317 | ],
318 | [
319 | 480.81422924901193,
320 | 332.3359683794467
321 | ],
322 | [
323 | 494.81422924901193,
324 | 335.3359683794467
325 | ],
326 | [
327 | 498.81422924901193,
328 | 329.3359683794467
329 | ],
330 | [
331 | 494.81422924901193,
332 | 308.3359683794467
333 | ],
334 | [
335 | 499.81422924901193,
336 | 297.3359683794467
337 | ],
338 | [
339 | 499.81422924901193,
340 | 90.33596837944665
341 | ]
342 | ],
343 | "fill_color": null,
344 | "label": "person-2"
345 | },
346 | {
347 | "line_color": null,
348 | "points": [
349 | [
350 | 374.81422924901193,
351 | 159.33596837944665
352 | ],
353 | [
354 | 369.81422924901193,
355 | 170.33596837944665
356 | ],
357 | [
358 | 369.81422924901193,
359 | 210.33596837944665
360 | ],
361 | [
362 | 375.81422924901193,
363 | 212.33596837944665
364 | ],
365 | [
366 | 387.81422924901193,
367 | 209.33596837944665
368 | ],
369 | [
370 | 385.81422924901193,
371 | 185.33596837944665
372 | ],
373 | [
374 | 385.81422924901193,
375 | 168.33596837944665
376 | ],
377 | [
378 | 385.81422924901193,
379 | 165.33596837944665
380 | ],
381 | [
382 | 382.81422924901193,
383 | 159.33596837944665
384 | ]
385 | ],
386 | "fill_color": null,
387 | "label": "bottle"
388 | },
389 | {
390 | "line_color": null,
391 | "points": [
392 | [
393 | 370.81422924901193,
394 | 170.33596837944665
395 | ],
396 | [
397 | 366.81422924901193,
398 | 173.33596837944665
399 | ],
400 | [
401 | 365.81422924901193,
402 | 182.33596837944665
403 | ],
404 | [
405 | 368.81422924901193,
406 | 185.33596837944665
407 | ]
408 | ],
409 | "fill_color": null,
410 | "label": "person-2"
411 | },
412 | {
413 | "line_color": null,
414 | "points": [
415 | [
416 | 338.81422924901193,
417 | 266.3359683794467
418 | ],
419 | [
420 | 313.81422924901193,
421 | 269.3359683794467
422 | ],
423 | [
424 | 297.81422924901193,
425 | 277.3359683794467
426 | ],
427 | [
428 | 282.81422924901193,
429 | 288.3359683794467
430 | ],
431 | [
432 | 273.81422924901193,
433 | 302.3359683794467
434 | ],
435 | [
436 | 272.81422924901193,
437 | 320.3359683794467
438 | ],
439 | [
440 | 279.81422924901193,
441 | 337.3359683794467
442 | ],
443 | [
444 | 428.81422924901193,
445 | 337.3359683794467
446 | ],
447 | [
448 | 432.81422924901193,
449 | 316.3359683794467
450 | ],
451 | [
452 | 423.81422924901193,
453 | 296.3359683794467
454 | ],
455 | [
456 | 403.81422924901193,
457 | 283.3359683794467
458 | ],
459 | [
460 | 370.81422924901193,
461 | 270.3359683794467
462 | ]
463 | ],
464 | "fill_color": null,
465 | "label": "__ignore__"
466 | }
467 | ],
468 | "imageData": null,
469 | "lineColor": [
470 | 0,
471 | 255,
472 | 0,
473 | 128
474 | ],
475 | "fillColor": [
476 | 255,
477 | 0,
478 | 0,
479 | 128
480 | ]
481 | }
--------------------------------------------------------------------------------