├── alfred
├── io
│ ├── __init__.py
│ └── h5_wrapper.py
├── torch
│ ├── __init__.py
│ └── common.py
├── modules
│ ├── data
│ │ ├── coco2voc.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-36.pyc
│ │ │ ├── view_coco.cpython-36.pyc
│ │ │ ├── view_voc.cpython-36.pyc
│ │ │ ├── voc2coco.cpython-36.pyc
│ │ │ └── gather_voclabels.cpython-36.pyc
│ │ ├── eval_coco.py
│ │ ├── __init__.py
│ │ ├── labelone_view.py
│ │ ├── split_voc.py
│ │ ├── voc2yolo.py
│ │ ├── gather_voclabels.py
│ │ ├── split_coco.py
│ │ ├── coco2yolo.py
│ │ ├── view_yolo.py
│ │ ├── convert_csv2voc.py
│ │ ├── txt2voc.py
│ │ └── view_txt.py
│ ├── dltool
│ │ └── __init__.py
│ ├── cabinet
│ │ ├── mdparse
│ │ │ ├── __init__.py
│ │ │ ├── formatters
│ │ │ │ ├── __init__.py
│ │ │ │ ├── simple.py
│ │ │ │ ├── html.py
│ │ │ │ └── pdf.py
│ │ │ ├── transformers
│ │ │ │ ├── __init__.py
│ │ │ │ ├── md
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── transformer.py
│ │ │ │ └── html
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── transformer.py
│ │ │ ├── string_tools.py
│ │ │ └── www_tools.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-36.pyc
│ │ │ ├── license.cpython-36.pyc
│ │ │ ├── split_txt.cpython-36.pyc
│ │ │ └── count_file.cpython-36.pyc
│ │ ├── templates
│ │ │ ├── bsd-3.tmpl
│ │ │ ├── gpl-v3.tmpl
│ │ │ ├── agpl-v3.tmpl
│ │ │ ├── lgpl-v3.tmpl
│ │ │ ├── lgpl-v2.1.tmpl
│ │ │ ├── apache-2.tmpl
│ │ │ ├── cecill.tmpl
│ │ │ ├── cecill-B.tmpl
│ │ │ └── cecill-C.tmpl
│ │ ├── __init__.py
│ │ ├── changesource.py
│ │ ├── count_file.py
│ │ ├── face_crop.py
│ │ ├── stack_imgs.py
│ │ ├── webcam.py
│ │ └── split_txt.py
│ ├── scrap
│ │ ├── readme.md
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-35.pyc
│ │ │ ├── __init__.cpython-36.pyc
│ │ │ ├── image_scraper.cpython-35.pyc
│ │ │ └── image_scraper.cpython-36.pyc
│ │ └── __init__.py
│ ├── text
│ │ ├── readme.md
│ │ └── __init__.py
│ ├── vision
│ │ ├── readme.md
│ │ ├── __pycache__
│ │ │ ├── vis_kit.cpython-36.pyc
│ │ │ ├── __init__.cpython-35.pyc
│ │ │ ├── __init__.cpython-36.pyc
│ │ │ ├── to_video.cpython-35.pyc
│ │ │ ├── to_video.cpython-36.pyc
│ │ │ ├── video_extractor.cpython-35.pyc
│ │ │ ├── video_extractor.cpython-36.pyc
│ │ │ └── video_reducer.cpython-36.pyc
│ │ ├── __init__.py
│ │ ├── to_video.py
│ │ ├── video_extractor.py
│ │ ├── video_reducer.py
│ │ └── combine_img_column.py
│ ├── __pycache__
│ │ ├── __init__.cpython-35.pyc
│ │ └── __init__.cpython-36.pyc
│ └── __init__.py
├── deploy
│ └── tensorrt
│ │ ├── __init__.py
│ │ ├── calibrator.py
│ │ └── process.py
├── deprecated
│ └── dl
│ │ ├── data
│ │ ├── meta
│ │ │ ├── __init__.py
│ │ │ ├── concatenated_dataset.py
│ │ │ ├── dataset_mixin.py
│ │ │ └── getter_dataset.py
│ │ └── common
│ │ │ └── __init__.py
│ │ ├── evaluator
│ │ └── __init__.py
│ │ ├── metrics
│ │ └── __init__.py
│ │ ├── __init__.py
│ │ ├── torch
│ │ ├── __init__.py
│ │ ├── ops
│ │ │ ├── __init__.py
│ │ │ └── array_ops.py
│ │ ├── nn
│ │ │ ├── modules
│ │ │ │ ├── __init__.py
│ │ │ │ └── normalization.py
│ │ │ ├── __init__.py
│ │ │ ├── functional.py
│ │ │ └── weights_init.py
│ │ ├── train
│ │ │ ├── __init__.py
│ │ │ └── common.py
│ │ ├── gpu.py
│ │ ├── common.py
│ │ ├── env.py
│ │ └── tools.py
│ │ ├── inference
│ │ └── __init__.py
│ │ └── tf
│ │ └── common.py
├── fonts
│ └── FZSSJW.TTF
├── fusion
│ ├── __pycache__
│ │ ├── __init__.cpython-36.pyc
│ │ ├── common.cpython-36.pyc
│ │ └── kitti_fusion.cpython-36.pyc
│ ├── __init__.py
│ └── geometry.py
├── vis
│ ├── pointcloud
│ │ ├── __pycache__
│ │ │ └── draw3dbox.cpython-36.pyc
│ │ └── __init__.py
│ ├── mesh3d
│ │ ├── assets
│ │ │ ├── sphere_vertices_2.txt
│ │ │ ├── sphere_faces_2.txt
│ │ │ ├── sphere_vertices_4.txt
│ │ │ ├── sphere_faces_4.txt
│ │ │ └── sphere_vertices_8.txt
│ │ └── default_viscfg.yml
│ ├── image
│ │ ├── process.py
│ │ ├── __init__.py
│ │ ├── face.py
│ │ ├── seg.py
│ │ └── pose_datasets
│ │ │ ├── onehand10k.py
│ │ │ └── interhand2d.py
│ └── __init__.py
├── dl
│ └── __init__.py
├── protos
│ └── labelmap.proto
├── utils
│ ├── pprint.py
│ ├── image_convertor.py
│ ├── cv_wrapper.py
│ ├── __init__.py
│ ├── math_utils.py
│ ├── base_config.py
│ ├── progress.py
│ ├── communicate.py
│ ├── mana.py
│ └── log.py
├── version.py
├── siren
│ └── topicgen.py
├── __init__.py
├── tests.py
└── tests
│ └── cv_box_fancy.py
├── bumpversion.bat
├── examples
├── .gitignore
├── result.png
├── data
│ ├── X3D.npy
│ ├── 000000.png
│ ├── 000011.bin
│ ├── 000011.png
│ ├── 0000000002.png
│ ├── 0000000172.bin
│ ├── 0000000172.png
│ └── 000011.txt
├── README.md
├── demo_show_mesh.py
├── demo_o3d_server.py
├── chatbot.py
├── projection_cal.py
├── vis_coco.py
├── demo_o3d.py
├── alfred_show_box_gt.py
├── demo_p3d.py
├── pykitti_test.py
└── draw_3d_pointcloud.py
├── .DS_Store
├── ps.sh
├── upload_pypi.bat
├── MANIFEST.in
├── .gitignore
├── PKG-INFO
├── setup.cfg
└── upload_pypi.sh
/alfred/io/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/alfred/torch/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/alfred/modules/data/coco2voc.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/alfred/modules/dltool/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/alfred/deploy/tensorrt/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/bumpversion.bat:
--------------------------------------------------------------------------------
1 | bumpver update -p -n
--------------------------------------------------------------------------------
/alfred/deprecated/dl/data/meta/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/alfred/deprecated/dl/evaluator/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/alfred/deprecated/dl/metrics/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/mdparse/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/alfred/deprecated/dl/data/common/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/examples/.gitignore:
--------------------------------------------------------------------------------
1 | cow.obj
2 | cow_mesh/
3 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/mdparse/formatters/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/mdparse/transformers/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/mdparse/transformers/md/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/alfred/modules/scrap/readme.md:
--------------------------------------------------------------------------------
1 | scrap module goes here
2 |
--------------------------------------------------------------------------------
/alfred/modules/text/readme.md:
--------------------------------------------------------------------------------
1 | text module goes here.
2 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/mdparse/transformers/html/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/alfred/modules/vision/readme.md:
--------------------------------------------------------------------------------
1 | vision module goes here
2 |
--------------------------------------------------------------------------------
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/.DS_Store
--------------------------------------------------------------------------------
/examples/result.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/examples/result.png
--------------------------------------------------------------------------------
/examples/data/X3D.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/examples/data/X3D.npy
--------------------------------------------------------------------------------
/alfred/fonts/FZSSJW.TTF:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/fonts/FZSSJW.TTF
--------------------------------------------------------------------------------
/examples/data/000000.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/examples/data/000000.png
--------------------------------------------------------------------------------
/examples/data/000011.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/examples/data/000011.bin
--------------------------------------------------------------------------------
/examples/data/000011.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/examples/data/000011.png
--------------------------------------------------------------------------------
/examples/data/0000000002.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/examples/data/0000000002.png
--------------------------------------------------------------------------------
/examples/data/0000000172.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/examples/data/0000000172.bin
--------------------------------------------------------------------------------
/examples/data/0000000172.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/examples/data/0000000172.png
--------------------------------------------------------------------------------
/ps.sh:
--------------------------------------------------------------------------------
1 | # autopep8 -r ./minigemini/ -i
2 |
3 | git add .
4 | git commit -am 'add'
5 | git push origin main
6 |
--------------------------------------------------------------------------------
/alfred/torch/common.py:
--------------------------------------------------------------------------------
1 | # import torch
2 |
3 | # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
4 | device = 'cuda'
5 |
--------------------------------------------------------------------------------
/alfred/fusion/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/fusion/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/alfred/fusion/__pycache__/common.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/fusion/__pycache__/common.cpython-36.pyc
--------------------------------------------------------------------------------
/upload_pypi.bat:
--------------------------------------------------------------------------------
1 | python3 setup.py check
2 |
3 |
4 | rm build/
5 | rm dist/
6 |
7 | python setup.py sdist
8 | twine upload dist/*
9 |
10 |
--------------------------------------------------------------------------------
/alfred/modules/__pycache__/__init__.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/modules/__pycache__/__init__.cpython-35.pyc
--------------------------------------------------------------------------------
/alfred/modules/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/modules/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/alfred/fusion/__pycache__/kitti_fusion.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/fusion/__pycache__/kitti_fusion.cpython-36.pyc
--------------------------------------------------------------------------------
/alfred/modules/data/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/modules/data/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/alfred/modules/data/__pycache__/view_coco.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/modules/data/__pycache__/view_coco.cpython-36.pyc
--------------------------------------------------------------------------------
/alfred/modules/data/__pycache__/view_voc.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/modules/data/__pycache__/view_voc.cpython-36.pyc
--------------------------------------------------------------------------------
/alfred/modules/data/__pycache__/voc2coco.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/modules/data/__pycache__/voc2coco.cpython-36.pyc
--------------------------------------------------------------------------------
/alfred/modules/scrap/__pycache__/__init__.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/modules/scrap/__pycache__/__init__.cpython-35.pyc
--------------------------------------------------------------------------------
/alfred/modules/scrap/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/modules/scrap/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/alfred/modules/vision/__pycache__/vis_kit.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/modules/vision/__pycache__/vis_kit.cpython-36.pyc
--------------------------------------------------------------------------------
/examples/README.md:
--------------------------------------------------------------------------------
1 | Examples provides some demo for test 3d visualizatin and point cloud visualization.
2 |
3 | - `demo_p3d.py`: visualize a model with pytorch3d;
4 | -
--------------------------------------------------------------------------------
/alfred/modules/cabinet/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/modules/cabinet/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/alfred/modules/cabinet/__pycache__/license.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/modules/cabinet/__pycache__/license.cpython-36.pyc
--------------------------------------------------------------------------------
/alfred/modules/cabinet/__pycache__/split_txt.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/modules/cabinet/__pycache__/split_txt.cpython-36.pyc
--------------------------------------------------------------------------------
/alfred/modules/vision/__pycache__/__init__.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/modules/vision/__pycache__/__init__.cpython-35.pyc
--------------------------------------------------------------------------------
/alfred/modules/vision/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/modules/vision/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/alfred/modules/vision/__pycache__/to_video.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/modules/vision/__pycache__/to_video.cpython-35.pyc
--------------------------------------------------------------------------------
/alfred/modules/vision/__pycache__/to_video.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/modules/vision/__pycache__/to_video.cpython-36.pyc
--------------------------------------------------------------------------------
/alfred/vis/pointcloud/__pycache__/draw3dbox.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/vis/pointcloud/__pycache__/draw3dbox.cpython-36.pyc
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include LICENSE
2 | include README.md
3 | include alfred/modules/cabinet/templates/*.tmpl
4 | include alfred/fonts/FZSSJW.TTF
5 | include alfred/vis/mesh3d/assets/
--------------------------------------------------------------------------------
/alfred/modules/cabinet/__pycache__/count_file.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/modules/cabinet/__pycache__/count_file.cpython-36.pyc
--------------------------------------------------------------------------------
/alfred/modules/scrap/__pycache__/image_scraper.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/modules/scrap/__pycache__/image_scraper.cpython-35.pyc
--------------------------------------------------------------------------------
/alfred/modules/scrap/__pycache__/image_scraper.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/modules/scrap/__pycache__/image_scraper.cpython-36.pyc
--------------------------------------------------------------------------------
/alfred/modules/data/__pycache__/gather_voclabels.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/modules/data/__pycache__/gather_voclabels.cpython-36.pyc
--------------------------------------------------------------------------------
/alfred/modules/vision/__pycache__/video_extractor.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/modules/vision/__pycache__/video_extractor.cpython-35.pyc
--------------------------------------------------------------------------------
/alfred/modules/vision/__pycache__/video_extractor.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/modules/vision/__pycache__/video_extractor.cpython-36.pyc
--------------------------------------------------------------------------------
/alfred/modules/vision/__pycache__/video_reducer.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucasjinreal/alfred/HEAD/alfred/modules/vision/__pycache__/video_reducer.cpython-36.pyc
--------------------------------------------------------------------------------
/alfred/dl/__init__.py:
--------------------------------------------------------------------------------
1 | # this API had been deprecated, device is hardcoded and has no effect
2 | # this was due to torch import on Windows too slow
3 | # we are deprecated this API.
4 | device = "cuda"
5 |
--------------------------------------------------------------------------------
/alfred/vis/mesh3d/assets/sphere_vertices_2.txt:
--------------------------------------------------------------------------------
1 | 0.000 0.000 1.000
2 | 0.000 0.000 -1.000
3 | 1.000 0.000 0.000
4 | 0.000 1.000 0.000
5 | -1.000 0.000 0.000
6 | -0.000 -1.000 0.000
7 |
--------------------------------------------------------------------------------
/alfred/modules/data/eval_coco.py:
--------------------------------------------------------------------------------
1 | """
2 |
3 | send 2 json files contains
4 | instances_gt.json and instances_generated.json
5 |
6 | this will calculate a mAP of coco
7 | then output the final result
8 |
9 | """
10 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea/
2 | build/
3 | alfred_py.egg-info/
4 | alfred.egg-info/
5 | dist/
6 | build/
7 | .vscode/
8 | vendor/
9 |
10 | *.pyc
11 | a.py
12 | __pycache__/vendor/
13 | upload_tpi.sh
14 | __pycache__/
15 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/templates/bsd-3.tmpl:
--------------------------------------------------------------------------------
1 | Copyright (c) ${years} ${owner}.
2 |
3 | This file is part of ${projectname}
4 | (see ${projecturl}).
5 |
6 | License: 3-clause BSD, see https://opensource.org/licenses/BSD-3-Clause
7 |
--------------------------------------------------------------------------------
/alfred/vis/mesh3d/assets/sphere_faces_2.txt:
--------------------------------------------------------------------------------
1 | 0 2 3
2 | 1 3 2
3 | 0 3 4
4 | 1 4 3
5 | 0 4 5
6 | 1 5 4
7 | 0 5 2
8 | 1 2 5
9 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/mdparse/formatters/simple.py:
--------------------------------------------------------------------------------
1 | """
2 | Simple formatter.
3 | """
4 |
5 |
6 | class SimpleFormatter:
7 | """
8 | Writes lines, "as is".
9 | """
10 |
11 | format = "md"
12 |
13 | @staticmethod
14 | def write(lines):
15 | return "".join(lines).encode("utf8")
16 |
--------------------------------------------------------------------------------
/examples/demo_show_mesh.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import open3d as o3d
3 | from alfred import print_shape
4 | import numpy as np
5 |
6 |
7 | a = sys.argv[1]
8 |
9 | mesh = o3d.io.read_triangle_mesh(a, print_progress=True)
10 | mesh.compute_vertex_normals()
11 |
12 | print(np.asarray(mesh.vertices).shape)
13 | print(np.asarray(mesh.triangles).shape)
14 |
15 | o3d.visualization.draw_geometries([mesh])
16 |
--------------------------------------------------------------------------------
/alfred/protos/labelmap.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto2";
2 |
3 | message LabelMapItem {
4 | // Both name and label are required.
5 | optional string name = 1;
6 | optional int32 label = 2;
7 | optional int32 id=4;
8 | // display_name is optional.
9 | optional string display_name = 3;
10 | }
11 |
12 | message LabelMap {
13 | repeated LabelMapItem item = 1;
14 | }
15 |
--------------------------------------------------------------------------------
/PKG-INFO:
--------------------------------------------------------------------------------
1 | Metadata-Version: 1.0
2 | Name: alfred-py
3 | Version: 1.0.6
4 | Summary: alfred is a deep learning scripts collection, built with many standby functions for process image or text
5 | Home-page: https://github.com/jinfagang/alfred
6 | Author: Lucas Jin
7 | Author-email: jinfagang10@163.com
8 | License: GPL
9 | Description: UNKNOWN
10 | Keywords: deep learning,script helper,tools
11 | Platform: any
12 |
--------------------------------------------------------------------------------
/alfred/vis/image/process.py:
--------------------------------------------------------------------------------
1 | """
2 |
3 | Process on image
4 |
5 | """
6 |
7 |
8 | import cv2
9 |
10 |
11 | def darken_image(ori_img, dark_factor=0.6):
12 | """
13 | this will darken origin image and return darken one
14 | """
15 | hsv_img = cv2.cvtColor(ori_img, cv2.COLOR_BGR2HSV)
16 | hsv_img[..., 2] = hsv_img[..., 2] * dark_factor
17 | originimg = cv2.cvtColor(hsv_img, cv2.COLOR_HSV2BGR)
18 | return originimg
19 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/mdparse/formatters/html.py:
--------------------------------------------------------------------------------
1 | """
2 | HTML formatter.
3 | """
4 |
5 | from markdown import markdown
6 |
7 |
8 | class HTMLFormatter:
9 | """
10 | Convert lines in the HTML.
11 | """
12 |
13 | format = "html"
14 |
15 | @staticmethod
16 | def write(lines):
17 | md = markdown("".join(lines), output_format="html")
18 | return f"\n
\n\n{md}\n\n".encode()
19 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [egg_info]
2 | tag_build =
3 | tag_date = 0
4 |
5 |
6 | [bumpver]
7 | current_version = "3.0.8"
8 | version_pattern = "MAJOR.MINOR.PATCH"
9 | commit_message = "bump version {old_version} -> {new_version}"
10 | commit = True
11 | tag = True
12 | push = True
13 |
14 | [bumpver:file_patterns]
15 | setup.cfg =
16 | current_version = "{version}"
17 | alfred/version.py =
18 | __version__ = "{version}"
19 | setup.py =
20 | "{version}"
21 | "{pep440_version}"
22 | README.md =
23 | {version}
24 | {pep440_version}
25 |
26 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/mdparse/string_tools.py:
--------------------------------------------------------------------------------
1 | """
2 | Routines for the strings.
3 | """
4 |
5 | import re
6 | import unicodedata
7 |
8 |
9 | def slugify(value):
10 | """
11 | Normalizes string, converts to lowercase, removes non-alpha characters,
12 | and converts spaces to hyphens.
13 | """
14 |
15 | value = unicodedata.normalize("NFKD", value).encode("ascii", "ignore")
16 | value = re.sub(r"[^\w\s-]", "", value.decode()).strip().lower()
17 | value = re.sub(r"[-\s]+", "-", value)
18 |
19 | return value
20 |
--------------------------------------------------------------------------------
/examples/demo_o3d_server.py:
--------------------------------------------------------------------------------
1 | from alfred.vis.mesh3d.o3dsocket import VisOpen3DSocket
2 | from alfred.vis.mesh3d.o3d_visconfig import Config, get_default_visconfig, CONFIG
3 | import keyboard
4 |
5 |
6 | def main():
7 | cfg = get_default_visconfig()
8 | # cfg.body_model.args.body_type = 'smpl'
9 | # cfg.body_model.args.body_type = 'body25'
10 | cfg.body_model.args.body_type = 'h36m'
11 |
12 | server = VisOpen3DSocket(cfg=cfg)
13 | while True:
14 | server.update()
15 | if keyboard.is_pressed("q"):
16 | server.close()
17 | break
18 |
19 |
20 | if __name__ == "__main__":
21 | main()
22 |
--------------------------------------------------------------------------------
/alfred/utils/pprint.py:
--------------------------------------------------------------------------------
1 | from pprint import pprint, pformat
2 |
3 |
4 | def _highlight(code, filename):
5 | try:
6 | import pygments
7 | except ImportError:
8 | return code
9 |
10 | from pygments.lexers import Python3Lexer, YamlLexer
11 | from pygments.formatters import Terminal256Formatter
12 |
13 | lexer = Python3Lexer() if filename.endswith("py") else YamlLexer()
14 | code = pygments.highlight(code, lexer, Terminal256Formatter(style="monokai"))
15 | return code
16 |
17 |
18 | def print_colorful(content, format="py"):
19 | content = pformat(content, indent=4)
20 | print(_highlight(content, format))
21 |
--------------------------------------------------------------------------------
/alfred/version.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Lucas Jin. All rights reserved.
2 | from datetime import datetime
3 |
4 | major_num = 2
5 |
6 | __version__ = "3.1.0"
7 | short_version = __version__
8 |
9 |
10 | def parse_version_info(version_str):
11 | version_info = []
12 | for x in version_str.split("."):
13 | if x.isdigit():
14 | version_info.append(int(x))
15 | elif x.find("rc") != -1:
16 | patch_version = x.split("rc")
17 | version_info.append(int(patch_version[0]))
18 | version_info.append(f"rc{patch_version[1]}")
19 | return tuple(version_info)
20 |
21 |
22 | version_info = parse_version_info(__version__)
23 |
--------------------------------------------------------------------------------
/alfred/siren/topicgen.py:
--------------------------------------------------------------------------------
1 | def get_events_topic(cid):
2 | return "events/" + cid
3 |
4 |
5 | def get_presence_topic(cid):
6 | return "presence/" + cid
7 |
8 |
9 | def get_personal_events_topic(cid):
10 | return "personalevents/" + cid
11 |
12 |
13 | def get_chatting_topic(cid):
14 | return "messages/" + cid
15 |
16 |
17 | def get_file_chatting_topic(cid):
18 | return "filemessages/" + cid
19 |
20 |
21 | def get_archives_rooms_topic(cid):
22 | return "archivesrooms/" + cid
23 |
24 |
25 | def get_archives_messages_topic(cid):
26 | return "archivesmessages/" + cid
27 |
28 |
29 | def get_archives_myid_topic(cid):
30 | return "archivesmyid/" + cid
31 |
--------------------------------------------------------------------------------
/examples/chatbot.py:
--------------------------------------------------------------------------------
1 | from functools import wraps
2 | from alfred.siren.handler import SirenClient
3 | from alfred.siren.models import ChatMessage, InvitationMessage
4 |
5 | siren = SirenClient("daybreak_account", "password")
6 |
7 |
8 | @siren.on_received_invitation
9 | def on_received_invitation(msg: InvitationMessage):
10 | print("received invitation: ", msg.invitation)
11 | # directly agree this invitation for robots
12 |
13 |
14 | @siren.on_received_chat_message
15 | def on_received_chat_msg(msg: ChatMessage):
16 | print("got new msg: ", msg.text)
17 | siren.publish_txt_msg("I got your message O(∩_∩)O哈哈~", msg.roomId)
18 |
19 |
20 | if __name__ == "__main__":
21 | siren.loop()
22 |
--------------------------------------------------------------------------------
/alfred/vis/mesh3d/assets/sphere_vertices_4.txt:
--------------------------------------------------------------------------------
1 | 0.000 0.000 1.000
2 | 0.000 0.000 -1.000
3 | 0.707 0.000 0.707
4 | 0.500 0.500 0.707
5 | 0.000 0.707 0.707
6 | -0.500 0.500 0.707
7 | -0.707 0.000 0.707
8 | -0.500 -0.500 0.707
9 | -0.000 -0.707 0.707
10 | 0.500 -0.500 0.707
11 | 1.000 0.000 0.000
12 | 0.707 0.707 0.000
13 | 0.000 1.000 0.000
14 | -0.707 0.707 0.000
15 | -1.000 0.000 0.000
16 | -0.707 -0.707 0.000
17 | -0.000 -1.000 0.000
18 | 0.707 -0.707 0.000
19 | 0.707 0.000 -0.707
20 | 0.500 0.500 -0.707
21 | 0.000 0.707 -0.707
22 | -0.500 0.500 -0.707
23 | -0.707 0.000 -0.707
24 | -0.500 -0.500 -0.707
25 | -0.000 -0.707 -0.707
26 | 0.500 -0.500 -0.707
27 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/templates/gpl-v3.tmpl:
--------------------------------------------------------------------------------
1 | Copyright (c) ${years} ${owner}.
2 |
3 | This file is part of ${projectname}
4 | (see ${projecturl}).
5 |
6 | This program is free software: you can redistribute it and/or modify
7 | it under the terms of the GNU General Public License as published by
8 | the Free Software Foundation, either version 3 of the License, or
9 | (at your option) any later version.
10 |
11 | This program is distributed in the hope that it will be useful,
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 | GNU General Public License for more details.
15 |
16 | You should have received a copy of the GNU General Public License
17 | along with this program. If not, see .
18 |
--------------------------------------------------------------------------------
/alfred/utils/image_convertor.py:
--------------------------------------------------------------------------------
1 | """
2 |
3 | image format convert in popular libs
4 |
5 | PIL.Image -> OpenCV
6 | OpenCV -> PIL.Image
7 | Matplotlib.PLT -> OpenCV
8 |
9 | """
10 | from PIL import Image
11 | import numpy as np
12 | import cv2
13 |
14 |
15 | def cv2pil(image, inplace=True):
16 | if inplace:
17 | new_image = image
18 | else:
19 | new_image = image.copy()
20 | if new_image.ndim == 2:
21 | pass
22 | elif new_image.shape[2] == 3:
23 | new_image = new_image[:, :, ::-1]
24 | elif new_image.shape[2] == 4:
25 | new_image = new_image[:, :, [2, 1, 0, 3]]
26 | new_image = Image.fromarray(new_image)
27 | return new_image
28 |
29 |
30 | def pil2cv(pil_image):
31 | new_image = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
32 | return new_image
33 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/templates/agpl-v3.tmpl:
--------------------------------------------------------------------------------
1 | Copyright (c) ${years} ${owner}.
2 |
3 | This file is part of ${projectname}
4 | (see ${projecturl}).
5 |
6 | This program is free software: you can redistribute it and/or modify
7 | it under the terms of the GNU Affero General Public License as
8 | published by the Free Software Foundation, either version 3 of the
9 | License, or (at your option) any later version.
10 |
11 | This program is distributed in the hope that it will be useful,
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 | GNU Affero General Public License for more details.
15 |
16 | You should have received a copy of the GNU Affero General Public License
17 | along with this program. If not, see .
18 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/templates/lgpl-v3.tmpl:
--------------------------------------------------------------------------------
1 | Copyright (c) ${years} ${owner}.
2 |
3 | This file is part of ${projectname}
4 | (see ${projecturl}).
5 |
6 | This program is free software: you can redistribute it and/or modify
7 | it under the terms of the GNU Lesser General Public License as published by
8 | the Free Software Foundation, either version 3 of the License, or
9 | (at your option) any later version.
10 |
11 | This program is distributed in the hope that it will be useful,
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 | GNU Lesser General Public License for more details.
15 |
16 | You should have received a copy of the GNU Lesser General Public License
17 | along with this program. If not, see .
18 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/templates/lgpl-v2.1.tmpl:
--------------------------------------------------------------------------------
1 | Copyright (c) ${years} ${owner}.
2 |
3 | This file is part of ${projectname}
4 | (see ${projecturl}).
5 |
6 | This program is free software: you can redistribute it and/or modify
7 | it under the terms of the GNU Lesser General Public License as published by
8 | the Free Software Foundation, either version 2.1 of the License, or
9 | (at your option) any later version.
10 |
11 | This program is distributed in the hope that it will be useful,
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 | GNU Lesser General Public License for more details.
15 |
16 | You should have received a copy of the GNU Lesser General Public License
17 | along with this program. If not, see .
18 |
--------------------------------------------------------------------------------
/alfred/utils/cv_wrapper.py:
--------------------------------------------------------------------------------
1 | """
2 |
3 | some opencv wrappers to make video inference
4 | more simple
5 |
6 | """
7 | import cv2
8 | from PIL import Image, ImageFont, ImageDraw
9 | import os
10 | import numpy as np
11 |
12 |
13 | font_f = os.path.join(
14 | os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "fonts/FZSSJW.TTF"
15 | )
16 |
17 |
18 | def put_cn_txt_on_img(img, txt, ori, font_scale, color):
19 | """
20 | put Chinese text on image
21 | """
22 | img_PIL = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
23 | assert os.path.exists(font_f), "{} not found".format(font_f)
24 | font = ImageFont.truetype(font_f, 25)
25 | fillColor = color # (255,0,0)
26 | position = ori # (100,100)
27 | draw = ImageDraw.Draw(img_PIL)
28 | draw.text(position, txt, font=font, fill=fillColor)
29 | img = cv2.cvtColor(np.asarray(img_PIL), cv2.COLOR_RGB2BGR)
30 | return img
31 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/templates/apache-2.tmpl:
--------------------------------------------------------------------------------
1 | Copyright (c) ${years} ${owner}.
2 |
3 | This file is part of ${projectname}
4 | (see ${projecturl}).
5 |
6 | Licensed to the Apache Software Foundation (ASF) under one
7 | or more contributor license agreements. See the NOTICE file
8 | distributed with this work for additional information
9 | regarding copyright ownership. The ASF licenses this file
10 | to you under the Apache License, Version 2.0 (the
11 | "License"); you may not use this file except in compliance
12 | with the License. You may obtain a copy of the License at
13 |
14 | http://www.apache.org/licenses/LICENSE-2.0
15 |
16 | Unless required by applicable law or agreed to in writing,
17 | software distributed under the License is distributed on an
18 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
19 | KIND, either express or implied. See the License for the
20 | specific language governing permissions and limitations
21 | under the License.
22 |
--------------------------------------------------------------------------------
/alfred/vis/__init__.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 |
--------------------------------------------------------------------------------
/alfred/fusion/__init__.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 |
--------------------------------------------------------------------------------
/alfred/utils/__init__.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 |
--------------------------------------------------------------------------------
/alfred/vis/image/__init__.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 |
--------------------------------------------------------------------------------
/alfred/deprecated/dl/__init__.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 |
--------------------------------------------------------------------------------
/alfred/modules/data/__init__.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 |
--------------------------------------------------------------------------------
/alfred/vis/pointcloud/__init__.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 |
--------------------------------------------------------------------------------
/alfred/deprecated/dl/torch/__init__.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/__init__.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 |
--------------------------------------------------------------------------------
/alfred/modules/data/labelone_view.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 |
--------------------------------------------------------------------------------
/alfred/deprecated/dl/inference/__init__.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 |
--------------------------------------------------------------------------------
/alfred/deprecated/dl/torch/ops/__init__.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 |
--------------------------------------------------------------------------------
/alfred/deprecated/dl/torch/nn/modules/__init__.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 |
--------------------------------------------------------------------------------
/alfred/modules/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (c) 2020 JinTian.
4 | #
5 | # This file is part of alfred
6 | # (see http://jinfagang.github.io).
7 | #
8 | # Licensed to the Apache Software Foundation (ASF) under one
9 | # or more contributor license agreements. See the NOTICE file
10 | # distributed with this work for additional information
11 | # regarding copyright ownership. The ASF licenses this file
12 | # to you under the Apache License, Version 2.0 (the
13 | # "License"); you may not use this file except in compliance
14 | # with the License. You may obtain a copy of the License at
15 | #
16 | # http://www.apache.org/licenses/LICENSE-2.0
17 | #
18 | # Unless required by applicable law or agreed to in writing,
19 | # software distributed under the License is distributed on an
20 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
21 | # KIND, either express or implied. See the License for the
22 | # specific language governing permissions and limitations
23 | # under the License.
24 | #
25 |
--------------------------------------------------------------------------------
/alfred/modules/scrap/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (c) 2020 JinTian.
4 | #
5 | # This file is part of alfred
6 | # (see http://jinfagang.github.io).
7 | #
8 | # Licensed to the Apache Software Foundation (ASF) under one
9 | # or more contributor license agreements. See the NOTICE file
10 | # distributed with this work for additional information
11 | # regarding copyright ownership. The ASF licenses this file
12 | # to you under the Apache License, Version 2.0 (the
13 | # "License"); you may not use this file except in compliance
14 | # with the License. You may obtain a copy of the License at
15 | #
16 | # http://www.apache.org/licenses/LICENSE-2.0
17 | #
18 | # Unless required by applicable law or agreed to in writing,
19 | # software distributed under the License is distributed on an
20 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
21 | # KIND, either express or implied. See the License for the
22 | # specific language governing permissions and limitations
23 | # under the License.
24 | #
25 |
--------------------------------------------------------------------------------
/alfred/modules/text/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (c) 2020 JinTian.
4 | #
5 | # This file is part of alfred
6 | # (see http://jinfagang.github.io).
7 | #
8 | # Licensed to the Apache Software Foundation (ASF) under one
9 | # or more contributor license agreements. See the NOTICE file
10 | # distributed with this work for additional information
11 | # regarding copyright ownership. The ASF licenses this file
12 | # to you under the Apache License, Version 2.0 (the
13 | # "License"); you may not use this file except in compliance
14 | # with the License. You may obtain a copy of the License at
15 | #
16 | # http://www.apache.org/licenses/LICENSE-2.0
17 | #
18 | # Unless required by applicable law or agreed to in writing,
19 | # software distributed under the License is distributed on an
20 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
21 | # KIND, either express or implied. See the License for the
22 | # specific language governing permissions and limitations
23 | # under the License.
24 | #
25 |
--------------------------------------------------------------------------------
/alfred/modules/vision/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (c) 2020 JinTian.
4 | #
5 | # This file is part of alfred
6 | # (see http://jinfagang.github.io).
7 | #
8 | # Licensed to the Apache Software Foundation (ASF) under one
9 | # or more contributor license agreements. See the NOTICE file
10 | # distributed with this work for additional information
11 | # regarding copyright ownership. The ASF licenses this file
12 | # to you under the Apache License, Version 2.0 (the
13 | # "License"); you may not use this file except in compliance
14 | # with the License. You may obtain a copy of the License at
15 | #
16 | # http://www.apache.org/licenses/LICENSE-2.0
17 | #
18 | # Unless required by applicable law or agreed to in writing,
19 | # software distributed under the License is distributed on an
20 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
21 | # KIND, either express or implied. See the License for the
22 | # specific language governing permissions and limitations
23 | # under the License.
24 | #
25 |
--------------------------------------------------------------------------------
/examples/projection_cal.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | P = "7.215377e+02 0.000000e+00 6.095593e+02 0.000000e+00 0.000000e+00 7.215377e+02 1.728540e+02 0.000000e+00 0.000000e+00 0.000000e+00 1.000000e+00 0.000000e+00"
5 | K = "9.842439e+02 0.000000e+00 6.900000e+02 0.000000e+00 9.808141e+02 2.331966e+02 0.000000e+00 0.000000e+00 1.000000e+00"
6 | R = "9.999239e-01 9.837760e-03 -7.445048e-03 -9.869795e-03 9.999421e-01 -4.278459e-03 7.402527e-03 4.351614e-03 9.999631e-01"
7 | T = "2.573699e-16 -1.059758e-16 1.614870e-16"
8 |
9 |
10 | def get_m_from_str(s):
11 | ss = [np.float32(i) for i in s.split(" ")]
12 | ll = len(ss)
13 | ss = np.array(ss)
14 | ss = ss.reshape((3, ll // 3))
15 | return ss
16 |
17 |
18 | p = get_m_from_str(P)
19 | print(p)
20 |
21 | k = get_m_from_str(K)
22 | r = get_m_from_str(R)
23 | t = get_m_from_str(T)
24 |
25 | r = np.vstack((r, [[0, 0, 0]]))
26 | t = np.vstack((t, [[1]]))
27 | rt = np.hstack([r, t])
28 | k = np.hstack([k, [[0], [0], [0]]])
29 |
30 | print(rt)
31 | print(k)
32 | c_p = np.dot(k, rt)
33 | print(c_p)
34 |
--------------------------------------------------------------------------------
/examples/vis_coco.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 | from alfred.vis.image.get_dataset_label_map import coco_label_map_list
25 |
26 |
27 | a = coco_label_map_list
28 | print(a)
29 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/mdparse/formatters/pdf.py:
--------------------------------------------------------------------------------
1 | """
2 | PDF formatter.
3 | """
4 |
5 | from markdown import markdown
6 | import weasyprint
7 |
8 |
9 | class PDFFormatter:
10 | """
11 | Writes lines, into the PDF.
12 | """
13 |
14 | format = "pdf"
15 |
16 | @staticmethod
17 | def _fetcher(url):
18 | return weasyprint.default_url_fetcher(url, timeout=1)
19 |
20 | @staticmethod
21 | def write(lines):
22 | return weasyprint.HTML(
23 | string=markdown("".join(lines), output_format="html"),
24 | url_fetcher=PDFFormatter._fetcher,
25 | ).write_pdf()
26 |
27 | # with BytesIO() as result:
28 | # pisa.pisaDocument(markdown(''.join(lines), output_format='html'), dest=result,
29 | # encoding='utf8',
30 | # link_callback=PDFFormatter._link_callback)
31 | # result.seek(0)
32 | # data = result.read()
33 | #
34 | # return data
35 |
36 | # pdfkit.from_string(markdown(''.join(lines), output_format='html'), 'fuck.pdf')
37 |
--------------------------------------------------------------------------------
/alfred/deprecated/dl/torch/nn/__init__.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 | from .functional import one_hot
25 | from .modules.common import Empty, Sequential
26 | from .modules.normalization import GroupNorm
27 |
--------------------------------------------------------------------------------
/alfred/utils/math_utils.py:
--------------------------------------------------------------------------------
1 | #
2 | # For licensing see accompanying LICENSE file.
3 | # Copyright (C) 2022 Apple Inc. All Rights Reserved.
4 | #
5 |
6 | from typing import Union, Optional
7 |
8 |
9 | def make_divisible(
10 | v: Union[float, int],
11 | divisor: Optional[int] = 8,
12 | min_value: Optional[Union[float, int]] = None,
13 | ) -> Union[float, int]:
14 | """
15 | This function is taken from the original tf repo.
16 | It ensures that all layers have a channel number that is divisible by 8
17 | It can be seen here:
18 | https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
19 | :param v:
20 | :param divisor:
21 | :param min_value:
22 | :return:
23 | """
24 | if min_value is None:
25 | min_value = divisor
26 | new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
27 | # Make sure that round down does not go down by more than 10%.
28 | if new_v < 0.9 * v:
29 | new_v += divisor
30 | return new_v
31 |
32 |
33 | def bound_fn(
34 | min_val: Union[float, int], max_val: Union[float, int], value: Union[float, int]
35 | ) -> Union[float, int]:
36 | return max(min_val, min(max_val, value))
37 |
--------------------------------------------------------------------------------
/alfred/vis/mesh3d/default_viscfg.yml:
--------------------------------------------------------------------------------
1 | host: '127.0.0.1'
2 | port: 9999
3 |
4 | width: 1920
5 | height: 1080
6 |
7 | max_human: 10
8 | track: True
9 | filter: False
10 | block: False # block visualization or not, True for visualize each frame, False in realtime applications
11 | rotate: False
12 | debug: False
13 | write: False
14 | out: 'none'
15 |
16 | body_model:
17 | module: "alfred.vis.mesh3d.skelmodel.SkelModel"
18 | args:
19 | body_type: "body25"
20 | joint_radius: 0.02
21 |
22 | camera:
23 | phi: 0
24 | theta: -30
25 | cx: 3.
26 | cy: 5.
27 | cz: 10.
28 |
29 | scene:
30 | - module: "alfred.vis.mesh3d.o3dwrapper.create_coord"
31 | args:
32 | camera: [0, 0, 0]
33 | radius: 1.6
34 | scale: 1.0
35 | - module: "alfred.vis.mesh3d.o3dwrapper.create_ground"
36 | args:
37 | center: [0, 0, 0]
38 | # xdir: [0.02, 0, 0]
39 | # ydir: [0, 0.02, 0]
40 | xdir: [1, 0, 0]
41 | ydir: [0, 1, 0]
42 | step: 1
43 | xrange: 5
44 | yrange: 5
45 | white: [1., 1., 1.]
46 | black: [0.5,0.5,0.5]
47 | two_sides: True
48 |
49 | range:
50 | minr: [-100, -100, -100]
51 | maxr: [ 100, 100, 100]
52 | rate_inlier: 0.8
53 | min_conf: 0.1
--------------------------------------------------------------------------------
/alfred/vis/mesh3d/assets/sphere_faces_4.txt:
--------------------------------------------------------------------------------
1 | 0 2 3
2 | 1 19 18
3 | 0 3 4
4 | 1 20 19
5 | 0 4 5
6 | 1 21 20
7 | 0 5 6
8 | 1 22 21
9 | 0 6 7
10 | 1 23 22
11 | 0 7 8
12 | 1 24 23
13 | 0 8 9
14 | 1 25 24
15 | 0 9 2
16 | 1 18 25
17 | 10 3 2
18 | 10 11 3
19 | 11 4 3
20 | 11 12 4
21 | 12 5 4
22 | 12 13 5
23 | 13 6 5
24 | 13 14 6
25 | 14 7 6
26 | 14 15 7
27 | 15 8 7
28 | 15 16 8
29 | 16 9 8
30 | 16 17 9
31 | 17 2 9
32 | 17 10 2
33 | 18 11 10
34 | 18 19 11
35 | 19 12 11
36 | 19 20 12
37 | 20 13 12
38 | 20 21 13
39 | 21 14 13
40 | 21 22 14
41 | 22 15 14
42 | 22 23 15
43 | 23 16 15
44 | 23 24 16
45 | 24 17 16
46 | 24 25 17
47 | 25 10 17
48 | 25 18 10
49 |
--------------------------------------------------------------------------------
/alfred/deprecated/dl/tf/common.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 | import os
25 |
26 |
27 | def mute_tf():
28 | """
29 | this function will mute tensorflow
30 | disable tensorflow logging information
31 | call this before you import tensorflow
32 | """
33 | os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
34 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/changesource.py:
--------------------------------------------------------------------------------
1 | import platform
2 | import os
3 |
4 |
5 | def mkdir(path):
6 | e = os.path.exists(path)
7 | if not e:
8 | os.makedirs(path)
9 | return True
10 | else:
11 | return False
12 |
13 |
14 | def mkfile(filePath):
15 | pipfile = "[global]\ntrusted-host=mirrors.aliyun.com\nindex-url=http://mirrors.aliyun.com/pypi/simple/"
16 | if os.path.exists(filePath):
17 | if str(input("File exist!Cover?(Y/N))")).upper() == "N":
18 | print("Not Cover.")
19 | return
20 | with open(filePath, "w") as fp:
21 | fp.write(pipfile)
22 | print("Write finish.")
23 |
24 |
25 | def change_pypi_source():
26 | systype = platform.system()
27 | print("System type: " + systype)
28 | if systype == "Windows":
29 | path = os.path.join(os.getenv("HOMEPATH"), "pip")
30 | mkdir(path)
31 | mkfile(os.path.join(path, "pip.ini"))
32 | elif systype == "Linux" or systype == "Darwin":
33 | path = os.path.join(os.path.expandvars("$HOME"), ".pip")
34 | mkdir(path)
35 | mkfile(os.path.join(path, "pip.conf"))
36 | else:
37 | print("System type: " + systype + " Not Support!")
38 |
--------------------------------------------------------------------------------
/alfred/deprecated/dl/torch/nn/modules/normalization.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 | import torch
25 |
26 |
27 | class GroupNorm(torch.nn.GroupNorm):
28 | def __init__(self, num_channels, num_groups, eps=1e-5, affine=True):
29 | super().__init__(
30 | num_groups=num_groups, num_channels=num_channels, eps=eps, affine=affine
31 | )
32 |
--------------------------------------------------------------------------------
/alfred/deprecated/dl/torch/train/__init__.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 | from .checkpoint import (
25 | latest_checkpoint,
26 | restore,
27 | restore_latest_checkpoints,
28 | restore_models,
29 | save,
30 | save_models,
31 | try_restore_latest_checkpoints,
32 | )
33 | from .common import create_folder
34 | from .optim import MixedPrecisionWrapper
35 |
--------------------------------------------------------------------------------
/alfred/deprecated/dl/torch/gpu.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | import torch
4 |
5 | __all__ = ["get_gpu_prop", "collect_gpu_info"]
6 |
7 |
8 | dirname = os.path.dirname(__file__)
9 |
10 |
11 | def get_gpu_devices_count():
12 | return torch.cuda.device_count()
13 |
14 |
15 | def get_gpu_prop(show=True):
16 | ngpus = torch.cuda.device_count()
17 |
18 | properties = []
19 | for dev in range(ngpus):
20 | prop = torch.cuda.get_device_properties(dev)
21 | properties.append(
22 | {
23 | "name": prop.name,
24 | "capability": [prop.major, prop.minor],
25 | # unit GB
26 | "total_momory": round(prop.total_memory / 1073741824, 2),
27 | "sm_count": prop.multi_processor_count,
28 | }
29 | )
30 |
31 | if show:
32 | print("cuda: {}".format(torch.cuda.is_available()))
33 | print("available GPU(s): {}".format(ngpus))
34 | for i, p in enumerate(properties):
35 | print("{}: {}".format(i, p))
36 | return properties
37 |
38 |
39 | def sort(d, tmp={}):
40 | for k in sorted(d.keys()):
41 | if isinstance(d[k], dict):
42 | tmp[k] = {}
43 | sort(d[k], tmp[k])
44 | else:
45 | tmp[k] = d[k]
46 | return tmp
47 |
--------------------------------------------------------------------------------
/alfred/vis/image/face.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 |
4 |
5 | def draw_face_landmarks(img, pts, box=None, color=(255, 147, 23), size=8):
6 | if pts is not None:
7 | print(pts.shape)
8 | n = pts.shape[1]
9 | if n <= 106:
10 | for i in range(n):
11 | cv2.circle(
12 | img, (int(round(pts[0, i])), int(round(pts[1, i]))), size, color, -1
13 | )
14 | else:
15 | sep = 1
16 | for i in range(0, n, sep):
17 | cv2.circle(
18 | img, (int(round(pts[0, i])), int(round(pts[1, i]))), 2, color, 1
19 | )
20 |
21 | if box is not None:
22 | line_color = (255, 127, 80)
23 | left, top, right, bottom = np.round(box).astype(np.int32)
24 | left_top = (left, top)
25 | right_top = (right, top)
26 | right_bottom = (right, bottom)
27 | left_bottom = (left, bottom)
28 | cv2.line(img, left_top, right_top, line_color, 1, cv2.LINE_AA)
29 | cv2.line(img, right_top, right_bottom, line_color, 1, cv2.LINE_AA)
30 | cv2.line(img, right_bottom, left_bottom, line_color, 1, cv2.LINE_AA)
31 | cv2.line(img, left_bottom, left_top, line_color, 1, cv2.LINE_AA)
32 | return img
33 |
--------------------------------------------------------------------------------
/alfred/deprecated/dl/torch/nn/functional.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 | import torch
25 |
26 |
27 | def one_hot(tensor, depth, dim=-1, on_value=1.0, dtype=torch.float32):
28 | tensor_onehot = torch.zeros(
29 | *list(tensor.shape), depth, dtype=dtype, device=tensor.device
30 | )
31 | tensor_onehot.scatter_(dim, tensor.unsqueeze(dim).long(), on_value)
32 | return tensor_onehot
33 |
--------------------------------------------------------------------------------
/upload_pypi.sh:
--------------------------------------------------------------------------------
1 | ##
2 | ## Copyright (c) 2020 JinTian.
3 | ##
4 | ## This file is part of alfred
5 | ## (see http://jinfagang.github.io).
6 | ##
7 | ## Licensed to the Apache Software Foundation (ASF) under one
8 | ## or more contributor license agreements. See the NOTICE file
9 | ## distributed with this work for additional information
10 | ## regarding copyright ownership. The ASF licenses this file
11 | ## to you under the Apache License, Version 2.0 (the
12 | ## "License"); you may not use this file except in compliance
13 | ## with the License. You may obtain a copy of the License at
14 | ##
15 | ## http://www.apache.org/licenses/LICENSE-2.0
16 | ##
17 | ## Unless required by applicable law or agreed to in writing,
18 | ## software distributed under the License is distributed on an
19 | ## "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | ## KIND, either express or implied. See the License for the
21 | ## specific language governing permissions and limitations
22 | ## under the License.
23 | ##
24 | # check setup is correct or not
25 | python3 setup.py check
26 |
27 | # bumpver update --patch
28 |
29 | sudo rm -r build/
30 | sudo rm -r dist/
31 |
32 | # pypi interface are not valid any longer
33 | # python3 setup.py sdist
34 | # python3 setup.py sdist upload -r pypi
35 |
36 | # using twine instead
37 | python3 setup.py sdist
38 | twine upload dist/*
39 |
40 |
--------------------------------------------------------------------------------
/alfred/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (c) 2020 JinTian.
4 | #
5 | # This file is part of alfred
6 | # (see http://jinfagang.github.io).
7 | #
8 | # Licensed to the Apache Software Foundation (ASF) under one
9 | # or more contributor license agreements. See the NOTICE file
10 | # distributed with this work for additional information
11 | # regarding copyright ownership. The ASF licenses this file
12 | # to you under the Apache License, Version 2.0 (the
13 | # "License"); you may not use this file except in compliance
14 | # with the License. You may obtain a copy of the License at
15 | #
16 | # http://www.apache.org/licenses/LICENSE-2.0
17 | #
18 | # Unless required by applicable law or agreed to in writing,
19 | # software distributed under the License is distributed on an
20 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
21 | # KIND, either express or implied. See the License for the
22 | # specific language governing permissions and limitations
23 | # under the License.
24 | #
25 | """Bring in all of the public Alfred interface into this module."""
26 | import importlib
27 |
28 | # pylint: disable=g-bad-import-order
29 |
30 | from .modules import *
31 | from .vis import *
32 | from .fusion import *
33 | # from .dl.torch.common import print_shape
34 | from .utils.log import logger
35 | # from .dl.torch.common import device
36 | from .utils.progress import pbar, prange
37 |
38 | globals().update(importlib.import_module("alfred").__dict__)
39 |
--------------------------------------------------------------------------------
/alfred/utils/base_config.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | from yacs.config import CfgNode as CN
3 | import argparse
4 |
5 |
6 | class Config:
7 | @classmethod
8 | def load_from_args(cls):
9 | parser = argparse.ArgumentParser()
10 | parser.add_argument("--cfg", type=str, default="config/base.yml")
11 | parser.add_argument("opts", default=None, nargs=argparse.REMAINDER)
12 | args = parser.parse_args()
13 | return cls.load(filename=args.cfg, opts=args.opts)
14 |
15 | @classmethod
16 | def load(cls, filename=None, opts=[]) -> CN:
17 | cfg = CN()
18 | cfg = cls.init(cfg)
19 | if filename is not None:
20 | cfg.merge_from_file(filename)
21 | if len(opts) > 0:
22 | cfg.merge_from_list(opts)
23 | cls.parse(cfg)
24 | cls.print(cfg)
25 | return cfg
26 |
27 | @staticmethod
28 | def init(cfg):
29 | return cfg
30 |
31 | @staticmethod
32 | def parse(cfg):
33 | pass
34 |
35 | @staticmethod
36 | def print(cfg):
37 | print("[Info] --------------")
38 | print("[Info] Configuration:")
39 | print("[Info] --------------")
40 | print(cfg)
41 |
42 |
43 | def load_object(module_name, module_args):
44 | module_path = ".".join(module_name.split(".")[:-1])
45 | module = importlib.import_module(module_path)
46 | name = module_name.split(".")[-1]
47 | obj = getattr(module, name)(**module_args)
48 | return obj
49 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/templates/cecill.tmpl:
--------------------------------------------------------------------------------
1 | Copyright © ${owner} ${years}
2 |
3 | This software is governed by the CeCILL license under French law and
4 | abiding by the rules of distribution of free software. You can use,
5 | modify and/ or redistribute the software under the terms of the CeCILL
6 | license as circulated by CEA, CNRS and INRIA at the following URL
7 | "http://www.cecill.info".
8 |
9 | As a counterpart to the access to the source code and rights to copy,
10 | modify and redistribute granted by the license, users are provided only
11 | with a limited warranty and the software's author, the holder of the
12 | economic rights, and the successive licensors have only limited liability.
13 |
14 | In this respect, the user's attention is drawn to the risks associated
15 | with loading, using, modifying and/or developing or reproducing the
16 | software by the user in light of its specific status of free software,
17 | that may mean that it is complicated to manipulate, and that also
18 | therefore means that it is reserved for developers and experienced
19 | professionals having in-depth computer knowledge. Users are therefore
20 | encouraged to load and test the software's suitability as regards their
21 | requirements in conditions enabling the security of their systemsand/or
22 | data to be ensured and, more generally, to use and operate it in the
23 | same conditions as regards security.
24 |
25 | The fact that you are presently reading this means that you have had
26 | knowledge of the CeCILL license and that you accept its terms.
27 |
--------------------------------------------------------------------------------
/alfred/utils/progress.py:
--------------------------------------------------------------------------------
1 | import time
2 | from typing import Callable, Iterable, Optional, Sequence, TypeVar, Union
3 | from rich.progress import track
4 |
5 |
6 | ProgressType = TypeVar("ProgressType")
7 | StyleType = Union[str, "Style"]
8 |
9 |
10 | def pbar(
11 | sequence: Union[Sequence[ProgressType], Iterable[ProgressType]],
12 | description: str = "Working...",
13 | total: Optional[float] = None,
14 | auto_refresh: bool = True,
15 | transient: bool = False,
16 | get_time: Optional[Callable[[], float]] = None,
17 | refresh_per_second: float = 10,
18 | style: StyleType = "bar.back",
19 | complete_style: StyleType = "bar.complete",
20 | finished_style: StyleType = "bar.finished",
21 | pulse_style: StyleType = "bar.pulse",
22 | update_period: float = 0.1,
23 | disable: bool = False,
24 | show_speed: bool = True,
25 | ) -> Iterable[ProgressType]:
26 | return track(
27 | sequence=sequence,
28 | description=description,
29 | total=total,
30 | auto_refresh=auto_refresh,
31 | transient=transient,
32 | get_time=get_time,
33 | refresh_per_second=refresh_per_second,
34 | style=style,
35 | complete_style=complete_style,
36 | finished_style=finished_style,
37 | pulse_style=pulse_style,
38 | update_period=update_period,
39 | disable=disable,
40 | show_speed=show_speed,
41 | )
42 |
43 |
44 | def prange(rg: Sequence[int], description: str = "processing..."):
45 | return pbar(range(rg), description=description)
46 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/templates/cecill-B.tmpl:
--------------------------------------------------------------------------------
1 | Copyright © ${owner} ${years}
2 |
3 | This software is governed by the CeCILL-B license under French law and
4 | abiding by the rules of distribution of free software. You can use,
5 | modify and/ or redistribute the software under the terms of the CeCILL-B
6 | license as circulated by CEA, CNRS and INRIA at the following URL
7 | "http://www.cecill.info".
8 |
9 | As a counterpart to the access to the source code and rights to copy,
10 | modify and redistribute granted by the license, users are provided only
11 | with a limited warranty and the software's author, the holder of the
12 | economic rights, and the successive licensors have only limited liability.
13 |
14 | In this respect, the user's attention is drawn to the risks associated
15 | with loading, using, modifying and/or developing or reproducing the
16 | software by the user in light of its specific status of free software,
17 | that may mean that it is complicated to manipulate, and that also
18 | therefore means that it is reserved for developers and experienced
19 | professionals having in-depth computer knowledge. Users are therefore
20 | encouraged to load and test the software's suitability as regards their
21 | requirements in conditions enabling the security of their systemsand/or
22 | data to be ensured and, more generally, to use and operate it in the
23 | same conditions as regards security.
24 |
25 | The fact that you are presently reading this means that you have had
26 | knowledge of the CeCILL-B license and that you accept its terms.
27 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/templates/cecill-C.tmpl:
--------------------------------------------------------------------------------
1 | Copyright © ${owner} ${years}
2 |
3 | This software is governed by the CeCILL-C license under French law and
4 | abiding by the rules of distribution of free software. You can use,
5 | modify and/ or redistribute the software under the terms of the CeCILL-C
6 | license as circulated by CEA, CNRS and INRIA at the following URL
7 | "http://www.cecill.info".
8 |
9 | As a counterpart to the access to the source code and rights to copy,
10 | modify and redistribute granted by the license, users are provided only
11 | with a limited warranty and the software's author, the holder of the
12 | economic rights, and the successive licensors have only limited liability.
13 |
14 | In this respect, the user's attention is drawn to the risks associated
15 | with loading, using, modifying and/or developing or reproducing the
16 | software by the user in light of its specific status of free software,
17 | that may mean that it is complicated to manipulate, and that also
18 | therefore means that it is reserved for developers and experienced
19 | professionals having in-depth computer knowledge. Users are therefore
20 | encouraged to load and test the software's suitability as regards their
21 | requirements in conditions enabling the security of their systemsand/or
22 | data to be ensured and, more generally, to use and operate it in the
23 | same conditions as regards security.
24 |
25 | The fact that you are presently reading this means that you have had
26 | knowledge of the CeCILL-C license and that you accept its terms.
27 |
--------------------------------------------------------------------------------
/examples/data/000011.txt:
--------------------------------------------------------------------------------
1 | P0: 7.215377000000e+02 0.000000000000e+00 6.095593000000e+02 0.000000000000e+00 0.000000000000e+00 7.215377000000e+02 1.728540000000e+02 0.000000000000e+00 0.000000000000e+00 0.000000000000e+00 1.000000000000e+00 0.000000000000e+00
2 | P1: 7.215377000000e+02 0.000000000000e+00 6.095593000000e+02 -3.875744000000e+02 0.000000000000e+00 7.215377000000e+02 1.728540000000e+02 0.000000000000e+00 0.000000000000e+00 0.000000000000e+00 1.000000000000e+00 0.000000000000e+00
3 | P2: 7.215377000000e+02 0.000000000000e+00 6.095593000000e+02 4.485728000000e+01 0.000000000000e+00 7.215377000000e+02 1.728540000000e+02 2.163791000000e-01 0.000000000000e+00 0.000000000000e+00 1.000000000000e+00 2.745884000000e-03
4 | P3: 7.215377000000e+02 0.000000000000e+00 6.095593000000e+02 -3.395242000000e+02 0.000000000000e+00 7.215377000000e+02 1.728540000000e+02 2.199936000000e+00 0.000000000000e+00 0.000000000000e+00 1.000000000000e+00 2.729905000000e-03
5 | R0_rect: 9.999239000000e-01 9.837760000000e-03 -7.445048000000e-03 -9.869795000000e-03 9.999421000000e-01 -4.278459000000e-03 7.402527000000e-03 4.351614000000e-03 9.999631000000e-01
6 | Tr_velo_to_cam: 7.533745000000e-03 -9.999714000000e-01 -6.166020000000e-04 -4.069766000000e-03 1.480249000000e-02 7.280733000000e-04 -9.998902000000e-01 -7.631618000000e-02 9.998621000000e-01 7.523790000000e-03 1.480755000000e-02 -2.717806000000e-01
7 | Tr_imu_to_velo: 9.999976000000e-01 7.553071000000e-04 -2.035826000000e-03 -8.086759000000e-01 -7.854027000000e-04 9.998898000000e-01 -1.482298000000e-02 3.195559000000e-01 2.024406000000e-03 1.482454000000e-02 9.998881000000e-01 -7.997231000000e-01
8 |
9 |
--------------------------------------------------------------------------------
/alfred/utils/communicate.py:
--------------------------------------------------------------------------------
1 | import socket
2 | import numpy as np
3 |
4 |
5 | class LocalSocketExchanger:
6 | """
7 | send and receive data between 2 program
8 | """
9 |
10 | def __init__(self, ip="127.0.0.1", port=5005, is_server=True) -> None:
11 | self.MAX_BUFFER_SIZE = 20480
12 |
13 | self.ip = ip
14 | self.port = port
15 | self.did_received_data_func = None
16 | self.is_server = is_server
17 | self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
18 | self._connect()
19 |
20 | def _connect(self):
21 | if self.is_server:
22 | self.socket.bind((self.ip, self.port))
23 | self.socket.listen(3)
24 | self.conn, addr = self.socket.accept()
25 | print("Server is ready. ", addr)
26 | else:
27 | self.socket.connect((self.ip, self.port))
28 |
29 | def send(self, data):
30 | """
31 | send any type of data, it might be List, NDArray, etc
32 | """
33 | if not self.is_server:
34 | self.socket.send(data)
35 | else:
36 | print("server can not send data for now.")
37 |
38 | def did_received_data(self, func):
39 | self.did_received_data_func = func
40 |
41 | def receive(self):
42 | data = self.conn.recv(self.MAX_BUFFER_SIZE)
43 | a = np.frombuffer(data, dtype=np.float32)
44 | if len(a) > 0:
45 | # print("receive data: ", a, a.shape)
46 | if self.did_received_data_func:
47 | self.did_received_data_func(a)
48 |
49 | def listen_and_loop(self):
50 | while True:
51 | self.receive()
52 |
--------------------------------------------------------------------------------
/alfred/deprecated/dl/data/meta/concatenated_dataset.py:
--------------------------------------------------------------------------------
1 | from .sliceable_dataset import SliceableDataset
2 |
3 |
4 | class ConcatenatedDataset(SliceableDataset):
5 | """A sliceable version of :class:`chainer.datasets.ConcatenatedDataset`.
6 |
7 | Here is an example.
8 |
9 | >>> dataset_a = TupleDataset([0, 1, 2], [0, 1, 4])
10 | >>> dataset_b = TupleDataset([3, 4, 5], [9, 16, 25])
11 | >>>
12 | >>> dataset = ConcatenatedDataset(dataset_a, dataset_b)
13 | >>> dataset.slice[:, 0][:] # [0, 1, 2, 3, 4, 5]
14 |
15 | Args:
16 | datasets: The underlying datasets.
17 | Each dataset should inherit
18 | :class:`~chainercv.chainer_experimental.datasets.sliceable.Sliceabledataset`
19 | and should have the same keys.
20 | """
21 |
22 | def __init__(self, *datasets):
23 | if len(datasets) == 0:
24 | raise ValueError("At least one dataset is required")
25 | self._datasets = datasets
26 | self._keys = datasets[0].keys
27 | for dataset in datasets[1:]:
28 | if not dataset.keys == self._keys:
29 | raise ValueError("All datasets should have the same keys")
30 |
31 | def __len__(self):
32 | return sum(len(dataset) for dataset in self._datasets)
33 |
34 | @property
35 | def keys(self):
36 | return self._keys
37 |
38 | def get_example_by_keys(self, index, key_indices):
39 | if index < 0:
40 | raise IndexError
41 | for dataset in self._datasets:
42 | if index < len(dataset):
43 | return dataset.get_example_by_keys(index, key_indices)
44 | index -= len(dataset)
45 | raise IndexError
46 |
--------------------------------------------------------------------------------
/alfred/modules/data/split_voc.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 | import os
25 | import glob
26 | import numpy as np
27 |
28 | save_dir = "./ImageSets/Main"
29 | os.makedirs(save_dir, exist_ok=True)
30 |
31 | all_imgs = [
32 | os.path.basename(i).split(".")[0] + "\n" for i in glob.glob("./JPEGImages/*.jpg")
33 | ]
34 |
35 | ratio = 0.9
36 | print("Found {} images, spliting ratio is 0.9".format(len(all_imgs)))
37 |
38 | np.random.shuffle(all_imgs)
39 | split = int(len(all_imgs) * ratio)
40 | train_ids = all_imgs[:split]
41 | val_ids = all_imgs[split:]
42 | print("{} for train, {} for validation.".format(len(train_ids), len(val_ids)))
43 |
44 | print("saving split..")
45 | with open(os.path.join(save_dir, "train.txt"), "w") as f:
46 | f.writelines(train_ids)
47 | with open(os.path.join(save_dir, "val.txt"), "w") as f:
48 | f.writelines(val_ids)
49 | print("Done.")
50 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/count_file.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 | """
25 |
26 | count how many certain files under dir
27 |
28 | """
29 | import os
30 | import glob
31 | from alfred.utils.log import logger as logging
32 |
33 |
34 | def count_file(d, f_type):
35 | assert os.path.exists(d), "{} not exist.".format(d)
36 | # f_type can be jpg,png,pdf etc, connected by comma
37 | all_types = f_type.split(",")
38 | logging.info("count all file types: {} under: {}".format(all_types, d))
39 | all_files = []
40 | for t in all_types:
41 | t = t.replace(".", "")
42 | one = glob.glob(os.path.join(d, "*.{}".format(t)))
43 | one = [i for i in one if os.path.isfile(i)]
44 | logging.info("{} num: {}".format(t, len(one)))
45 | all_files.extend(one)
46 | logging.info("file types: {}, total num: {}".format(all_types, len(all_files)))
47 |
--------------------------------------------------------------------------------
/alfred/deprecated/dl/torch/train/common.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 | import datetime
25 | import os
26 | import shutil
27 |
28 |
29 | def create_folder(prefix, add_time=True, add_str=None, delete=False):
30 | additional_str = ""
31 | if delete is True:
32 | if os.path.exists(prefix):
33 | shutil.rmtree(prefix)
34 | os.makedirs(prefix)
35 | folder = prefix
36 | if add_time is True:
37 | # additional_str has a form such as '170903_220351'
38 | additional_str += datetime.datetime.now().strftime("%y%m%d_%H%M%S")
39 | if add_str is not None:
40 | folder += "/" + additional_str + "_" + add_str
41 | else:
42 | folder += "/" + additional_str
43 | if delete is True:
44 | if os.path.exists(folder):
45 | shutil.rmtree(folder)
46 | os.makedirs(folder)
47 | return folder
48 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/face_crop.py:
--------------------------------------------------------------------------------
1 | try:
2 | import face_recognition
3 | import os
4 | except ImportError as e:
5 | pass
6 | import glob
7 | from PIL import Image
8 |
9 |
10 | def extract_and_save_face(image_path):
11 | if os.path.isdir(image_path):
12 | # Handle multiple file types
13 | images = glob.glob(os.path.join(image_path, "*"))
14 | images = [img for img in images if img.endswith((".png", ".jpg", ".jpeg"))]
15 | else:
16 | images = [image_path]
17 |
18 | for image_path in images:
19 | # Load the image
20 | image = face_recognition.load_image_file(image_path)
21 |
22 | # Find face locations
23 | face_locations = face_recognition.face_locations(image)
24 |
25 | ratio = 0.35
26 |
27 | for i, face_location in enumerate(face_locations):
28 | print(f"{i} {face_location}")
29 | top, right, bottom, left = face_location
30 | # left, top, right, bottom = face_location
31 |
32 | # Expand the box by 1.5 ratio
33 | height, width = bottom - top, right - left
34 | top = max(0, int(top - ratio * height))
35 | left = max(0, int(left - ratio * width))
36 | bottom = min(image.shape[0], int(bottom + ratio * height))
37 | right = min(image.shape[1], int(right + ratio * width))
38 |
39 | # Crop the face from the image
40 | face_image = image[top:bottom, left:right]
41 |
42 | # Save the image
43 | pil_image = Image.fromarray(face_image)
44 | pil_image.save(f"{image_path[:-4]}_{i}.jpg")
45 |
46 | if len(face_locations) == 0:
47 | print(f"No face detected. in: {image_path}")
48 |
49 | print("done!")
50 |
51 |
52 | # Use the function
53 | # extract_and_save_face("example.jpg")
54 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/stack_imgs.py:
--------------------------------------------------------------------------------
1 | """
2 |
3 | stack images in matrix style
4 |
5 | """
6 | import cv2
7 | import numpy as np
8 | from alfred.utils.log import logger as logging
9 |
10 |
11 | def check_shape_resize_if_possible(imgs):
12 | shapes = [i.shape for i in imgs]
13 | if len(set(shapes)) == 1:
14 | return imgs
15 | else:
16 | logging.info("detected images shape not equal, resize to the first shape...")
17 | imgs = [cv2.resize(i, (shapes[0][1], shapes[0][0])) for i in imgs]
18 | return imgs
19 |
20 |
21 | def stack_imgs(imgs_list, dim2d):
22 | """
23 | send a list of images
24 | then using dim2d to stack it
25 |
26 | for example:
27 | a.png
28 | b.png
29 | c.png
30 | d.png
31 |
32 | dim2d:
33 | 2x2
34 | """
35 | a = int(dim2d.split("x")[0])
36 | b = int(dim2d.split("x")[1])
37 | if len(imgs_list) % a != 0 or len(imgs_list) % b:
38 | logging.info(
39 | "dim2d {} is not applicable for {} images.".format(dim2d, len(imgs_list))
40 | )
41 | exit(0)
42 | elif len(imgs_list) != a * b:
43 | logging.error("len imgs not equal to: axb={}".format(a * b))
44 | exit(0)
45 | else:
46 | imgs_list = [cv2.imread(i) for i in imgs_list]
47 | all_raws = []
48 | # 2x1 bug?
49 | for ri in range(a):
50 | one_raw = []
51 | for ci in range(b):
52 | one_raw.append(imgs_list[ri * b + ci])
53 | logging.info("stacking row: {}, with len: {}".format(ri, len(one_raw)))
54 | imgs = check_shape_resize_if_possible(one_raw)
55 | img_a = np.hstack(imgs)
56 | all_raws.append(img_a)
57 | all_raws = check_shape_resize_if_possible(all_raws)
58 | final_img = np.vstack(all_raws)
59 | logging.info("final combined img shape: {}".format(final_img.shape))
60 | cv2.imwrite("stacked_img.jpg", final_img)
61 | logging.info("done.")
62 |
--------------------------------------------------------------------------------
/alfred/utils/mana.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 | """
25 | Utils using in MANA universe
26 |
27 | such as print welcome message
28 | """
29 | from colorama import Fore, Back, Style
30 |
31 |
32 | welcome_msg = """
33 | __ ______ _ _____ ___ ____
34 | / |/ / | / | / / | / | / _/
35 | / /|_/ / /| | / |/ / /| | / /| | / /
36 | / / / / ___ |/ /| / ___ |/ ___ |_/ /
37 | /_/ /_/_/ |_/_/ |_/_/ |_/_/ |_/___/ http://manaai.cn
38 | """
39 |
40 |
41 | def welcome(ori_git_url):
42 | print(Fore.YELLOW + Style.BRIGHT + "Welcome to MANA AI platform!" + Style.RESET_ALL)
43 | print(Fore.BLUE + Style.BRIGHT + welcome_msg + Style.RESET_ALL)
44 | print(
45 | Style.BRIGHT
46 | + "once you saw this msg, indicates you were back supported by our team!"
47 | + Style.RESET_ALL
48 | )
49 | print(
50 | "the latest updates of our codes always at: {} or {}".format(
51 | ori_git_url, "http://manaai.cn"
52 | )
53 | )
54 | print("NOTE: Our codes distributed from anywhere else were not supported!")
55 |
--------------------------------------------------------------------------------
/alfred/deprecated/dl/torch/nn/weights_init.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Open-MMLab. All rights reserved.
2 | import torch.nn as nn
3 |
4 |
5 | def constant_init(module, val, bias=0):
6 | if hasattr(module, "weight") and module.weight is not None:
7 | nn.init.constant_(module.weight, val)
8 | if hasattr(module, "bias") and module.bias is not None:
9 | nn.init.constant_(module.bias, bias)
10 |
11 |
12 | def xavier_init(module, gain=1, bias=0, distribution="normal"):
13 | assert distribution in ["uniform", "normal"]
14 | if distribution == "uniform":
15 | nn.init.xavier_uniform_(module.weight, gain=gain)
16 | else:
17 | nn.init.xavier_normal_(module.weight, gain=gain)
18 | if hasattr(module, "bias") and module.bias is not None:
19 | nn.init.constant_(module.bias, bias)
20 |
21 |
22 | def normal_init(module, mean=0, std=1, bias=0):
23 | nn.init.normal_(module.weight, mean, std)
24 | if hasattr(module, "bias") and module.bias is not None:
25 | nn.init.constant_(module.bias, bias)
26 |
27 |
28 | def uniform_init(module, a=0, b=1, bias=0):
29 | nn.init.uniform_(module.weight, a, b)
30 | if hasattr(module, "bias") and module.bias is not None:
31 | nn.init.constant_(module.bias, bias)
32 |
33 |
34 | def kaiming_init(
35 | module, a=0, mode="fan_out", nonlinearity="relu", bias=0, distribution="normal"
36 | ):
37 | assert distribution in ["uniform", "normal"]
38 | if distribution == "uniform":
39 | nn.init.kaiming_uniform_(
40 | module.weight, a=a, mode=mode, nonlinearity=nonlinearity
41 | )
42 | else:
43 | nn.init.kaiming_normal_(
44 | module.weight, a=a, mode=mode, nonlinearity=nonlinearity
45 | )
46 | if hasattr(module, "bias") and module.bias is not None:
47 | nn.init.constant_(module.bias, bias)
48 |
49 |
50 | def caffe2_xavier_init(module, bias=0):
51 | # `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch
52 | # Acknowledgment to FAIR's internal code
53 | kaiming_init(
54 | module, a=1, mode="fan_in", nonlinearity="leaky_relu", distribution="uniform"
55 | )
56 |
--------------------------------------------------------------------------------
/alfred/deprecated/dl/torch/ops/array_ops.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 | import ctypes
25 | import math
26 | import time
27 | import torch
28 |
29 |
30 | def scatter_nd(indices, updates, shape):
31 | """pytorch edition of tensorflow scatter_nd.
32 | this function don't contain except handle code. so use this carefully
33 | when indice repeats, don't support repeat add which is supported
34 | in tensorflow.
35 | """
36 | ret = torch.zeros(*shape, dtype=updates.dtype, device=updates.device)
37 | ndim = indices.shape[-1]
38 | output_shape = list(indices.shape[:-1]) + shape[indices.shape[-1] :]
39 | flatted_indices = indices.view(-1, ndim)
40 | slices = [flatted_indices[:, i] for i in range(ndim)]
41 | slices += [Ellipsis]
42 | ret[slices] = updates.view(*output_shape)
43 | return ret
44 |
45 |
46 | def gather_nd(params, indices):
47 | # this function has a limit that MAX_ADVINDEX_CALC_DIMS=5
48 | ndim = indices.shape[-1]
49 | output_shape = list(indices.shape[:-1]) + list(params.shape[indices.shape[-1] :])
50 | flatted_indices = indices.view(-1, ndim)
51 | slices = [flatted_indices[:, i] for i in range(ndim)]
52 | slices += [Ellipsis]
53 | return params[slices].view(*output_shape)
54 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/mdparse/www_tools.py:
--------------------------------------------------------------------------------
1 | """
2 | Some functions useful for the working with URLs and network.
3 | """
4 |
5 | import requests
6 | from typing import Optional
7 | import re
8 | import os
9 | from mimetypes import guess_extension
10 | from .string_tools import slugify
11 |
12 |
13 | def is_url(url: str, allowed_url_prefixes=("http", "ftp")) -> bool:
14 | """
15 | Check url for prefix match.
16 | """
17 |
18 | for prefix in set(allowed_url_prefixes):
19 | if url.startswith(prefix):
20 | return True
21 |
22 | return False
23 |
24 |
25 | def download_from_url(url: str, timeout=None):
26 | """
27 | Download file from the URL.
28 | :param url: URL to download.
29 | :param timeout: timeout before fail.
30 | """
31 |
32 | try:
33 | response = requests.get(url, allow_redirects=True, timeout=timeout)
34 | except requests.exceptions.SSLError:
35 | print("Incorrect SSL certificate, trying to download without verifying...")
36 | response = requests.get(
37 | url, allow_redirects=True, verify=False, timeout=timeout
38 | )
39 |
40 | if response.status_code != 200:
41 | raise OSError(str(response))
42 |
43 | return response
44 |
45 |
46 | def get_filename_from_url(req: requests.Response) -> Optional[str]:
47 | """
48 | Get filename from url and, if not found, try to get from content-disposition.
49 | """
50 |
51 | if req.url.find("/"):
52 | result = req.url.rsplit("/", 1)[1]
53 | else:
54 | cd = req.headers.get("content-disposition")
55 |
56 | if cd is None:
57 | return None
58 |
59 | file_name = re.findall("filename=(.+)", cd)
60 |
61 | if len(file_name) == 0:
62 | return None
63 |
64 | result = file_name[0]
65 |
66 | f_name, f_ext = os.path.splitext(result)
67 |
68 | result = (
69 | f'{slugify(f_name)}{guess_extension(req.headers["content-type"].partition(";")[0].strip())}'
70 | if not f_ext
71 | else f"{slugify(f_name)}.{slugify(f_ext)}"
72 | )
73 |
74 | return result
75 |
--------------------------------------------------------------------------------
/alfred/utils/log.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 | from loguru import logger
25 | import sys
26 | import time
27 |
28 |
29 | def init_logger():
30 | logger.remove() # Remove the pre-configured handler
31 | logger.start(
32 | sys.stderr,
33 | format="{level} {time:MM-DD HH:mm:ss} {file}:{line} - {message}",
34 | )
35 |
36 |
37 | def formatter(record):
38 | # package_name = get_package_name()
39 | filename = record["file"].name
40 | if len(record["file"].name) > 17:
41 | filename = record["file"].name[:12] + ".." + record["file"].name[-3:]
42 | record["extra"].update(filename=filename)
43 | return "{time:HH:mm:ss MM.DD} {level} {extra[filename]}:{line}]: {message}\n{exception}"
44 |
45 |
46 | logger.remove() # Remove the pre-configured handler
47 | logger.start(
48 | sys.stderr,
49 | format=formatter,
50 | )
51 |
52 |
53 | def save_log_to_file(f_n):
54 | logger.remove(handler_id=None) # 清除之前的设置
55 | # 设置生成日志文件,utf-8编码,每天0点切割,zip压缩,保留3天,异步写入
56 | logger.add(
57 | sink=f"{f_n}_{time}.log",
58 | level="INFO",
59 | rotation="00:00",
60 | retention="3 days",
61 | compression="zip",
62 | encoding="utf-8",
63 | enqueue=True,
64 | )
65 |
--------------------------------------------------------------------------------
/alfred/modules/data/voc2yolo.py:
--------------------------------------------------------------------------------
1 | import xml.etree.ElementTree as ET
2 | import pickle
3 | import os
4 | from os import listdir, getcwd
5 | from os.path import join
6 | import sys
7 | import glob
8 |
9 |
10 | def convert(size, box):
11 | dw = 1.0 / (size[0])
12 | dh = 1.0 / (size[1])
13 | x = (box[0] + box[1]) / 2.0 - 1
14 | y = (box[2] + box[3]) / 2.0 - 1
15 | w = box[1] - box[0]
16 | h = box[3] - box[2]
17 | x = x * dw
18 | w = w * dw
19 | y = y * dh
20 | h = h * dh
21 | return (x, y, w, h)
22 |
23 |
24 | def convert_annotation(xml_f, target_dir, classes_names):
25 | f_name = os.path.basename(xml_f).split(".")[0] + ".txt"
26 | out_file = open(os.path.join(target_dir, f_name), "w")
27 |
28 | tree = ET.parse(xml_f)
29 | root = tree.getroot()
30 | size = root.find("size")
31 | w = int(size.find("width").text)
32 | h = int(size.find("height").text)
33 |
34 | for obj in root.iter("object"):
35 | difficult = obj.find("difficult").text
36 | cls = obj.find("name").text
37 | if cls not in classes_names or int(difficult) == 1:
38 | continue
39 | cls_id = classes_names.index(cls)
40 | xmlbox = obj.find("bndbox")
41 | b = (
42 | float(xmlbox.find("xmin").text),
43 | float(xmlbox.find("xmax").text),
44 | float(xmlbox.find("ymin").text),
45 | float(xmlbox.find("ymax").text),
46 | )
47 | bb = convert((w, h), b)
48 | out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + "\n")
49 |
50 |
51 | def voc2yolo(img_dir, xml_dir, class_txt):
52 | classes_names = None
53 | if class_txt:
54 | classes_names = [i.strip() for i in open(class_txt, "r").readlines()]
55 |
56 | labels_target = os.path.join(
57 | os.path.dirname(xml_dir.rstrip("/")), "yolo_converted_from_voc"
58 | )
59 | print("labels dir to save: {}".format(labels_target))
60 | if not os.path.exists(labels_target):
61 | os.makedirs(labels_target)
62 |
63 | xmls = glob.glob(os.path.join(xml_dir, "*.xml"))
64 | for xml in xmls:
65 | convert_annotation(xml, labels_target, classes_names)
66 | print("Done!")
67 | print("class name order used is: ", classes_names)
68 |
--------------------------------------------------------------------------------
/alfred/tests.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (c) 2020 JinTian.
4 | #
5 | # This file is part of alfred
6 | # (see http://jinfagang.github.io).
7 | #
8 | # Licensed to the Apache Software Foundation (ASF) under one
9 | # or more contributor license agreements. See the NOTICE file
10 | # distributed with this work for additional information
11 | # regarding copyright ownership. The ASF licenses this file
12 | # to you under the Apache License, Version 2.0 (the
13 | # "License"); you may not use this file except in compliance
14 | # with the License. You may obtain a copy of the License at
15 | #
16 | # http://www.apache.org/licenses/LICENSE-2.0
17 | #
18 | # Unless required by applicable law or agreed to in writing,
19 | # software distributed under the License is distributed on an
20 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
21 | # KIND, either express or implied. See the License for the
22 | # specific language governing permissions and limitations
23 | # under the License.
24 | #
25 | from utils.mana import welcome
26 |
27 | from utils.log import logger as logging
28 | from vis.image.det import visualize_det_cv2
29 | import cv2
30 | import numpy as np
31 | from vis.image.get_dataset_label_map import coco_label_map_list
32 | from vis.image.common import draw_rect_with_style
33 | import torch
34 | from dl.torch.common import print_tensor
35 |
36 | from varname import varname
37 |
38 |
39 | def a_func(num):
40 | print(varname() + ": " + str(num))
41 |
42 |
43 | def clothes(func):
44 | def wear():
45 | print("Buy clothes!{}".format(func.__name__))
46 | return func()
47 |
48 | return wear
49 |
50 |
51 | @clothes
52 | def body():
53 | print("The body feels could!")
54 |
55 |
56 | if __name__ == "__main__":
57 | v = a_func(1098)
58 |
59 | # welcome('')
60 | # logging.info('hi hiu')
61 | # logging.error('ops')
62 |
63 | # a = cv2.imread('/home/jintian/Pictures/1.jpeg')
64 |
65 | # dets = [
66 | # [1, 0.9, 4, 124, 333, 256],
67 | # [2, 0.7, 155, 336, 367, 485],
68 | # ]
69 | # dets = np.array(dets)
70 | # print(type(a))
71 |
72 | # draw_rect_with_style(a, (78, 478), (478, 223), (0, 255, 255), style='dashed')
73 | # visualize_det_cv2(a, dets, coco_label_map_list, is_show=True)
74 |
75 | aaa = torch.randn([1, 23, 45])
76 | print_tensor(aaa)
77 |
--------------------------------------------------------------------------------
/alfred/fusion/geometry.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 | """
25 |
26 | Convert quaternion to euler angles
27 |
28 |
29 | """
30 | import numpy as np
31 | import math
32 |
33 |
34 | def euler_to_quaternion(yaw, pitch, roll):
35 | """
36 | return a list of [qx, qy, qz, qw]
37 | """
38 | qx = np.sin(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) - np.cos(
39 | roll / 2
40 | ) * np.sin(pitch / 2) * np.sin(yaw / 2)
41 | qy = np.cos(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2) + np.sin(
42 | roll / 2
43 | ) * np.cos(pitch / 2) * np.sin(yaw / 2)
44 | qz = np.cos(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2) - np.sin(
45 | roll / 2
46 | ) * np.sin(pitch / 2) * np.cos(yaw / 2)
47 | qw = np.cos(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) + np.sin(
48 | roll / 2
49 | ) * np.sin(pitch / 2) * np.sin(yaw / 2)
50 | return [qx, qy, qz, qw]
51 |
52 |
53 | def quaternion_to_euler(x, y, z, w):
54 | """
55 | return a list of euler angles [yaw, pitch, roll]
56 | """
57 | t0 = +2.0 * (w * x + y * z)
58 | t1 = +1.0 - 2.0 * (x * x + y * y)
59 | roll = math.atan2(t0, t1)
60 | t2 = +2.0 * (w * y - z * x)
61 | t2 = +1.0 if t2 > +1.0 else t2
62 | t2 = -1.0 if t2 < -1.0 else t2
63 | pitch = math.asin(t2)
64 | t3 = +2.0 * (w * z + x * y)
65 | t4 = +1.0 - 2.0 * (y * y + z * z)
66 | yaw = math.atan2(t3, t4)
67 | return [yaw, pitch, roll]
68 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/webcam.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2021 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 |
25 | """
26 |
27 | showing webcam if you have USB camera
28 | this is using for test your USB usable or not
29 | """
30 | import cv2 as cv
31 | import os
32 | from colorama import Fore, Back, Style
33 |
34 |
35 | def webcam_test(vf):
36 | if vf is not None and os.path.isfile(vf):
37 | print(Fore.CYAN + "webcam on: ", Style.RESET_ALL, vf, " press q to quit.")
38 | cap = cv.VideoCapture(vf)
39 | while cap.isOpend():
40 | ret, frame = cap.read()
41 |
42 | if not ret:
43 | break
44 |
45 | data = preprocess(frame)
46 | cmap, paf = model(data)
47 | cmap, paf = cmap.detach().cpu(), paf.detach().cpu()
48 | # , cmap_threshold=0.15, link_threshold=0.15)
49 | counts, objects, peaks = parse_objects(cmap, paf)
50 | draw_objects(frame, counts, objects, peaks)
51 |
52 | cv.imshow("res", frame)
53 | cv.waitKey(1)
54 | else:
55 | print(Fore.CYAN + "test webcam, press q to quit.", Style.RESET_ALL)
56 | cap = cv.VideoCapture(0)
57 | while cap.isOpened():
58 | ret, frame = cap.read()
59 |
60 | if not ret:
61 | break
62 |
63 | cv.imshow("Webcam", frame)
64 | if cv.waitKey(1) == ord("q"):
65 | break
66 |
--------------------------------------------------------------------------------
/alfred/deploy/tensorrt/calibrator.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | #
16 |
17 | import os
18 | import tensorrt as trt
19 | import numpy as np
20 | import ctypes
21 |
22 | try:
23 | import pycuda.driver as cuda
24 | except ImportError as e:
25 | print("pycuda not installed, calibrator will be disabled.")
26 |
27 |
28 | class Calibrator(trt.IInt8EntropyCalibrator2):
29 | """calibrator
30 | IInt8EntropyCalibrator2
31 | IInt8LegacyCalibrator
32 | IInt8EntropyCalibrator
33 | IInt8MinMaxCalibrator
34 |
35 | """
36 |
37 | def __init__(self, stream, cache_file=""):
38 | trt.IInt8EntropyCalibrator2.__init__(self)
39 | self.stream = stream
40 | self.d_input = cuda.mem_alloc(self.stream.calibration_data.nbytes)
41 | self.cache_file = cache_file
42 | # print(self.cache_file)
43 | stream.reset()
44 |
45 | def get_batch_size(self):
46 | return self.stream.batch_size
47 |
48 | def get_batch(self, names):
49 | batch = self.stream.next_batch()
50 | if not batch.size:
51 | return None
52 |
53 | cuda.memcpy_htod(self.d_input, batch)
54 | return [int(self.d_input)]
55 |
56 | def read_calibration_cache(self):
57 | # If there is a cache, use it instead of calibrating again. Otherwise, implicitly return None.
58 | if os.path.exists(self.cache_file):
59 | with open(self.cache_file, "rb") as f:
60 | print(f"[INFO] Using calibration cache to save time: {self.cache_file}")
61 | return f.read()
62 |
63 | def write_calibration_cache(self, cache):
64 | with open(self.cache_file, "wb") as f:
65 | print(f"[INFO] Caching calibration data for future use: {self.cache_file}")
66 | f.write(cache)
67 |
--------------------------------------------------------------------------------
/examples/demo_o3d.py:
--------------------------------------------------------------------------------
1 | import socket
2 | import time
3 | from alfred.vis.mesh3d.utils import BaseSocketClient
4 | import os
5 | import numpy as np
6 | import json
7 |
8 |
9 | def send_body25(client):
10 | crt_d = os.path.dirname(__file__)
11 | aa = json.load(open(os.path.join(crt_d, "data/keyp3d.json")))
12 | for d in aa:
13 | pid = d["id"] if "id" in d.keys() else d["personID"]
14 | pose3d = np.array(d["keypoints3d"], dtype=np.float32)
15 | if pose3d.shape[0] > 25:
16 | pose3d[25, :] = pose3d[7, :]
17 | pose3d[46, :] = pose3d[4, :]
18 | if pose3d.shape[1] == 3:
19 | pose3d = np.hstack([pose3d, np.ones((pose3d.shape[0], 1))])
20 | a = [{"id": pid, "keypoints3d": pose3d}]
21 | client.send(a)
22 | time.sleep(0.05)
23 |
24 |
25 | def send_h36m(client):
26 | crt_d = os.path.dirname(__file__)
27 | aa = np.load(os.path.join(crt_d, 'data/X3D.npy'))
28 | for d in aa:
29 | pose3d = np.array(d, dtype=np.float32)[:, [2, 0, 1]]
30 | pose3d[:, 2] = -pose3d[:, 2] + 0.6
31 | pose3d[:, 0] = -pose3d[:, 0]
32 | a = [{
33 | 'id': 0,
34 | 'keypoints3d': pose3d
35 | }]
36 | client.send(a)
37 | time.sleep(0.04)
38 |
39 |
40 | def send_smpl24(client):
41 | crt_d = os.path.dirname(__file__)
42 | aa = json.load(
43 | open("/media/jintian/samsung/source/ai/swarm/toolchains/mmkd/a.json")
44 | )
45 | for d in aa:
46 | pid = d["id"] if "id" in d.keys() else d["personID"]
47 | pose3d = np.array(d["keypoints3d"], dtype=np.float32)
48 | a = [{"id": pid, "keypoints3d": pose3d}]
49 | client.send(a)
50 | time.sleep(0.05)
51 |
52 |
53 | if __name__ == "__main__":
54 | import argparse
55 |
56 | parser = argparse.ArgumentParser()
57 | parser.add_argument('--host', type=str, default='127.0.0.1')
58 | parser.add_argument('--port', type=int, default=9999)
59 | parser.add_argument('--step', type=int, default=1)
60 | parser.add_argument('-t', '--type')
61 | parser.add_argument('--debug', action='store_true')
62 | args = parser.parse_args()
63 |
64 | if args.host == "auto":
65 | args.host = socket.gethostname()
66 | client = BaseSocketClient(args.host, args.port)
67 |
68 | if args.type == 'smpl':
69 | send_smpl24(client)
70 | elif args.type == 'body25':
71 | send_body25(client)
72 | elif args.type == 'h36m':
73 | send_h36m(client)
74 |
--------------------------------------------------------------------------------
/alfred/io/h5_wrapper.py:
--------------------------------------------------------------------------------
1 | """
2 |
3 | a extremly convenient way to loading h5 data as dict
4 | """
5 |
6 | import h5py
7 | import numpy as np
8 |
9 |
10 | def save_h5(path, data):
11 | """Save a Python dict-like object into an HDF5 file."""
12 | with h5py.File(path, "w") as f:
13 | _save_to_group(f, data)
14 |
15 |
16 | def load_h5(path):
17 | """Load an HDF5 file and return it as a Python dict."""
18 | with h5py.File(path, "r") as f:
19 | return _load_from_group(f)
20 |
21 |
22 | # ---------------- internal helpers ---------------- #
23 |
24 |
25 | def _save_to_group(h5grp, obj, name=None):
26 | if name is None:
27 | group = h5grp
28 | else:
29 | group = h5grp.require_group(name)
30 |
31 | if isinstance(obj, dict):
32 | group.attrs["__type__"] = "dict"
33 | for k, v in obj.items():
34 | _save_to_group(group, v, k)
35 |
36 | elif isinstance(obj, (list, tuple)):
37 | group.attrs["__type__"] = "list" if isinstance(obj, list) else "tuple"
38 | for i, v in enumerate(obj):
39 | _save_to_group(group, v, str(i))
40 |
41 | elif isinstance(obj, np.ndarray):
42 | group.create_dataset(name, data=obj)
43 |
44 | elif isinstance(obj, (int, float, str, np.number)):
45 | group.attrs["__scalar__"] = obj
46 |
47 | else:
48 | raise TypeError(f"Unsupported type: {type(obj)}")
49 |
50 |
51 | def _load_from_group(h5grp):
52 | # scalar
53 | if "__scalar__" in h5grp.attrs:
54 | return _convert_scalar(h5grp.attrs["__scalar__"])
55 |
56 | # array
57 | if isinstance(h5grp, h5py.Dataset):
58 | return h5grp[()]
59 |
60 | # complex structure
61 | t = h5grp.attrs.get("__type__", None)
62 |
63 | if t == "dict":
64 | return {k: _load_from_group(h5grp[k]) for k in h5grp.keys()}
65 |
66 | elif t == "list":
67 | # keys are "0","1",...
68 | items = sorted(h5grp.keys(), key=lambda x: int(x))
69 | return [_load_from_group(h5grp[k]) for k in items]
70 |
71 | elif t == "tuple":
72 | items = sorted(h5grp.keys(), key=lambda x: int(x))
73 | return tuple(_load_from_group(h5grp[k]) for k in items)
74 |
75 | else:
76 | # if no type tag: treat as dataset
77 | if isinstance(h5grp, h5py.Dataset):
78 | return h5grp[()]
79 | return {k: _load_from_group(h5grp[k]) for k in h5grp.keys()}
80 |
81 |
82 | def _convert_scalar(x):
83 | if isinstance(x, np.generic):
84 | return x.item()
85 | return x
86 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/mdparse/transformers/html/transformer.py:
--------------------------------------------------------------------------------
1 | """
2 | Images extractor from markdown document.
3 | """
4 | #
5 | # from lxml import html
6 | # from typing import List
7 | #
8 | #
9 | # __all__ = ['ArticleTransformer']
10 | #
11 | #
12 | # class ImgExtractor:
13 | # def run(self, doc):
14 | # """
15 | # Find all images in HTML.
16 | # """
17 | #
18 | # tree = html.fromstring(doc)
19 | # images = tree.xpath('//img/@src')
20 | # # links = tree.xpath('//a/@href')
21 | #
22 | # return images
23 | #
24 | #
25 | #
26 | # class ImgExtExtension(Extension):
27 | # def extendMarkdown(self, md, md_globals):
28 | # img_ext = ImgExtractor(md)
29 | # md.treeprocessors.register(img_ext, 'imgext', 20)
30 | #
31 | #
32 | # class ArticleTransformer:
33 | # """
34 | # Markdown article transformation class.
35 | # """
36 | #
37 | # def __init__(self, article_path: str, image_downloader):
38 | # self._image_downloader = image_downloader
39 | # self._article_file_path = article_path
40 | # self._md_conv = markdown.Markdown(extensions=[ImgExtExtension()])
41 | # self._replacement_mapping = {}
42 | #
43 | # def _read_article(self) -> List[str]:
44 | # with open(self._article_file_path, 'r') as m_file:
45 | # self._md_conv.convert(m_file.read())
46 | #
47 | # print(f'Images links count = {len(self._md_conv.images)}')
48 | # images = set(self._md_conv.images)
49 | # print(f'Unique images links count = {len(images)}')
50 | #
51 | # return images
52 | #
53 | # def _fix_document_urls(self) -> None:
54 | # print('Replacing images urls in the document...')
55 | # replacement_mapping = self._replacement_mapping
56 | # lines = []
57 | # with open(self._article_file_path, 'r') as infile:
58 | # for line in infile:
59 | # for src, target in replacement_mapping.items():
60 | # line = line.replace(src, target)
61 | # lines.append(line)
62 | #
63 | # with open(self._article_file_path, 'w') as outfile:
64 | # for line in lines:
65 | # outfile.write(line)
66 | #
67 | # def run(self):
68 | # """
69 | # Run article conversion.
70 | # """
71 | #
72 | # self._replacement_mapping = self._image_downloader.download_images(self._read_article())
73 | # self._fix_document_urls()
74 | #
75 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/mdparse/transformers/md/transformer.py:
--------------------------------------------------------------------------------
1 | """
2 | Images extractor from markdown document.
3 | """
4 |
5 | import markdown
6 | from markdown.treeprocessors import Treeprocessor
7 | from markdown.extensions import Extension
8 | from markdown.inlinepatterns import SimpleTagPattern
9 | from typing import List
10 |
11 |
12 | __all__ = ["ArticleTransformer"]
13 |
14 |
15 | class ImgExtractor(Treeprocessor):
16 | def run(self, doc):
17 | """
18 | Find all images and append to markdown.images.
19 | """
20 |
21 | self.md.images = []
22 | for image in doc.findall(".//img"):
23 | self.md.images.append(image.get("src"))
24 |
25 |
26 | class ImgExtExtension(Extension):
27 | def extendMarkdown(self, md, md_globals):
28 | img_ext = ImgExtractor(md)
29 | md.treeprocessors.register(img_ext, "imgext", 20)
30 |
31 |
32 | class ArticleTransformer:
33 | """
34 | Markdown article transformation class.
35 | """
36 |
37 | def __init__(self, article_path: str, image_downloader):
38 | self._image_downloader = image_downloader
39 | self._article_file_path = article_path
40 | self._md_conv = markdown.Markdown(extensions=[ImgExtExtension(), "md_in_html"])
41 | self._replacement_mapping = {}
42 |
43 | def _read_article(self) -> List[str]:
44 | with open(self._article_file_path, "r", encoding="utf8") as m_file:
45 | self._md_conv.convert(m_file.read())
46 |
47 | print(f"Images links count = {len(self._md_conv.images)}")
48 | images = set(self._md_conv.images)
49 | print(f"Unique images links count = {len(images)}")
50 |
51 | return images
52 |
53 | def _fix_document_urls(self) -> List[str]:
54 | # print('Replacing images urls in the document...')
55 | replacement_mapping = self._replacement_mapping
56 | lines = []
57 | with open(self._article_file_path, "r", encoding="utf8") as infile:
58 | for line in infile:
59 | for src, target in replacement_mapping.items():
60 | line = line.replace(src, target)
61 | lines.append(line)
62 |
63 | return lines
64 |
65 | def run(self):
66 | """
67 | Run article conversion.
68 | """
69 |
70 | self._replacement_mapping = self._image_downloader.download_images(
71 | self._read_article()
72 | )
73 | return self._fix_document_urls()
74 |
--------------------------------------------------------------------------------
/alfred/tests/cv_box_fancy.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 | import cv2
25 | import numpy as np
26 |
27 | # ============================================================================
28 |
29 |
30 | def draw_border(img, pt1, pt2, color, thickness, r, d):
31 | x1, y1 = pt1
32 | x2, y2 = pt2
33 |
34 | # Top left
35 | cv2.line(img, (x1 + r, y1), (x1 + r + d, y1), color, thickness)
36 | cv2.line(img, (x1, y1 + r), (x1, y1 + r + d), color, thickness)
37 | cv2.ellipse(img, (x1 + r, y1 + r), (r, r), 180, 0, 90, color, thickness)
38 |
39 | # Top right
40 | cv2.line(img, (x2 - r, y1), (x2 - r - d, y1), color, thickness)
41 | cv2.line(img, (x2, y1 + r), (x2, y1 + r + d), color, thickness)
42 | cv2.ellipse(img, (x2 - r, y1 + r), (r, r), 270, 0, 90, color, thickness)
43 |
44 | # Bottom left
45 | cv2.line(img, (x1 + r, y2), (x1 + r + d, y2), color, thickness)
46 | cv2.line(img, (x1, y2 - r), (x1, y2 - r - d), color, thickness)
47 | cv2.ellipse(img, (x1 + r, y2 - r), (r, r), 90, 0, 90, color, thickness)
48 |
49 | # Bottom right
50 | cv2.line(img, (x2 - r, y2), (x2 - r - d, y2), color, thickness)
51 | cv2.line(img, (x2, y2 - r), (x2, y2 - r - d), color, thickness)
52 | cv2.ellipse(img, (x2 - r, y2 - r), (r, r), 0, 0, 90, color, thickness)
53 |
54 |
55 | # ============================================================================
56 |
57 |
58 | img = np.zeros((512, 512, 3), dtype=np.uint8)
59 |
60 | draw_border(img, (10, 10), (100, 100), (127, 255, 255), 2, 10, 20)
61 | draw_border(img, (128, 128), (240, 160), (255, 255, 127), 2, 10, 5)
62 | draw_border(img, (68, 68), (123, 289), (0, 255, 0), 1, 2, 5)
63 |
64 | cv2.imshow("round_rect.png", img)
65 | cv2.waitKey(0)
66 |
--------------------------------------------------------------------------------
/examples/alfred_show_box_gt.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 | import os
25 |
26 | import sys
27 | import numpy as np
28 | from alfred.vis.image.common import get_unique_color_by_id
29 | from alfred.fusion.kitti_fusion import (
30 | LidarCamCalibData,
31 | load_pc_from_file,
32 | cam3d_to_pixel,
33 | lidar_pt_to_cam0_frame,
34 | )
35 | from alfred.fusion.common import (
36 | draw_3d_box,
37 | compute_3d_box_cam_coords,
38 | center_to_corner_3d,
39 | )
40 | import cv2
41 |
42 |
43 | img_f = os.path.join(os.path.dirname(os.path.abspath(__file__)), "./data/000011.png")
44 | v_f = os.path.join(os.path.dirname(os.path.abspath(__file__)), "./data/000011.bin")
45 | calib_f = os.path.join(os.path.dirname(os.path.abspath(__file__)), "./data/000011.txt")
46 | frame_calib = LidarCamCalibData(calib_f=calib_f)
47 |
48 | res = [
49 | [5.06, 1.43, 12.42, 1.90, 0.42, 1.04, 0.68],
50 | [-5.12, 1.85, 4.13, 1.50, 1.46, 3.70, 1.56],
51 | [-4.95, 1.83, 26.64, 1.86, 1.57, 3.83, 1.55],
52 | ]
53 |
54 | img = cv2.imread(img_f)
55 |
56 | for p in res:
57 | xyz = np.array([p[:3]])
58 |
59 | c2d = cam3d_to_pixel(xyz, frame_calib)
60 | if c2d is not None:
61 | cv2.circle(img, (int(c2d[0]), int(c2d[1])), 8, (0, 0, 255), -1)
62 |
63 | # hwl -> lwh
64 | lwh = np.array([p[3:6]])[:, [2, 1, 0]]
65 | r_y = p[6]
66 | pts3d = compute_3d_box_cam_coords(xyz[0], lwh[0], r_y)
67 |
68 | pts2d = []
69 | for pt in pts3d:
70 | coords = cam3d_to_pixel(pt, frame_calib)
71 | if coords is not None:
72 | pts2d.append(coords[:2])
73 | pts2d = np.array(pts2d)
74 | draw_3d_box(pts2d, img)
75 |
76 | cv2.imshow("rr", img)
77 | cv2.imwrite("result.png", img)
78 | cv2.waitKey(0)
79 |
--------------------------------------------------------------------------------
/alfred/modules/data/gather_voclabels.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 | """
25 | gather all voc labels from Annotations root folder
26 | which contains all xml annotations
27 |
28 | """
29 |
30 | """
31 |
32 | gather the label from Annotations
33 | """
34 | import os
35 | import pickle
36 | import os.path
37 | import sys
38 | import numpy as np
39 |
40 | if sys.version_info[0] == 2:
41 | import xml.etree.cElementTree as ET
42 | else:
43 | import xml.etree.ElementTree as ET
44 | import glob
45 |
46 |
47 | def gather_labels(anno_dir):
48 | all_labels = glob.glob(os.path.join(anno_dir, "*.xml"))
49 | all_names = []
50 | all_obj_num = 0
51 | xmls_without_boxes = []
52 | i = 0
53 | cls_num_map = dict()
54 | for label in all_labels:
55 | if i % 500 == 0:
56 | print("parsing [{}/{}] {}".format(i, len(all_labels), label))
57 | i += 1
58 | root = ET.parse(label).getroot()
59 | one_sample_obj_num = 0
60 | for obj in root.iter("object"):
61 | one_sample_obj_num += 1
62 | name = obj.find("name").text
63 | if name in cls_num_map.keys():
64 | cls_num_map[name] += 1
65 | else:
66 | cls_num_map[name] = 0
67 | if name not in all_names:
68 | all_names.append(name)
69 | if one_sample_obj_num == 0:
70 | xmls_without_boxes.append(label)
71 | all_obj_num += one_sample_obj_num
72 | print("Done. summary...")
73 | print("all {} classes.".format(len(all_names)))
74 | print(all_names)
75 | # we also read xmls with empty boxes
76 | print("\nclass boxes statistic as: {}".format(cls_num_map))
77 | if len(xmls_without_boxes) > 0:
78 | print(
79 | "\nalso, we found these files without any detections, you can consider remove it:"
80 | )
81 | print(xmls_without_boxes)
82 |
--------------------------------------------------------------------------------
/alfred/modules/data/split_coco.py:
--------------------------------------------------------------------------------
1 | """
2 |
3 | Split coco dataset
4 |
5 | """
6 | import json
7 | import argparse
8 | import funcy
9 | from sklearn.model_selection import train_test_split
10 | import os
11 |
12 |
13 | def save_coco(file, info, licenses, images, annotations, categories):
14 | with open(file, "wt", encoding="UTF-8") as coco:
15 | json.dump(
16 | {
17 | "info": info,
18 | "licenses": licenses,
19 | "images": images,
20 | "annotations": annotations,
21 | "categories": categories,
22 | },
23 | coco,
24 | indent=2,
25 | sort_keys=True,
26 | )
27 |
28 |
29 | def filter_annotations(annotations, images):
30 | image_ids = funcy.lmap(lambda i: int(i["id"]), images)
31 | return funcy.lfilter(lambda a: int(a["image_id"]) in image_ids, annotations)
32 |
33 |
34 | def split_coco(ann_f, split=0.8, remove_empty=False):
35 | train_f = os.path.join(
36 | os.path.dirname(ann_f), os.path.basename(ann_f).replace(".json", "_train.json")
37 | )
38 | val_f = os.path.join(
39 | os.path.dirname(ann_f), os.path.basename(ann_f).replace(".json", "_val.json")
40 | )
41 | with open(ann_f, "rt", encoding="UTF-8") as annotations:
42 | coco = json.load(annotations)
43 | info = ""
44 | if "info" in coco.keys():
45 | info = coco["info"]
46 | licenses = ""
47 | if "licenses" in coco.keys():
48 | licenses = coco["licenses"]
49 | images = coco["images"]
50 | annotations = coco["annotations"]
51 | categories = coco["categories"]
52 |
53 | number_of_images = len(images)
54 |
55 | if remove_empty:
56 | images_with_annotations = funcy.lmap(
57 | lambda a: int(a["image_id"]), annotations
58 | )
59 |
60 | # filter out images without annotations
61 | images = funcy.lremove(
62 | lambda i: i["id"] not in images_with_annotations, images
63 | )
64 | print(
65 | "removed {} images without annotations, all images: {}, now: {}".format(
66 | number_of_images - len(images), number_of_images, len(images)
67 | )
68 | )
69 | else:
70 | print("all images: {}".format(number_of_images))
71 |
72 | x, y = train_test_split(images, train_size=float(split))
73 |
74 | save_coco(
75 | train_f, info, licenses, x, filter_annotations(annotations, x), categories
76 | )
77 | save_coco(
78 | val_f, info, licenses, y, filter_annotations(annotations, y), categories
79 | )
80 |
81 | print(
82 | "Saved {} entries in {} and {} in {}.".format(
83 | len(x), train_f, len(y), val_f
84 | )
85 | )
86 |
--------------------------------------------------------------------------------
/alfred/vis/image/seg.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 | """
25 | draw segmentation result
26 |
27 | even instance segmentation result
28 |
29 | """
30 | import numpy as np
31 | import cv2
32 | from .get_dataset_color_map import label_to_color_image
33 | from .get_dataset_color_map import _ADE20K, _CITYSCAPES, _MAPILLARY_VISTAS, _PASCAL
34 |
35 | from .mask import label2color_mask
36 |
37 |
38 | def vis_semantic_seg(
39 | img, seg, alpha=0.7, override_colormap=None, color_suite="cityscapes", is_show=False
40 | ):
41 | mask_color = label2color_mask(
42 | seg, override_id_clr_map=override_colormap, color_suit=color_suite
43 | )
44 | img_shape = img.shape
45 | mask_shape = mask_color.shape
46 | if img_shape != mask_shape:
47 | # resize mask to img shape
48 | mask_color = cv2.resize(mask_color, (img.shape[1], img.shape[0]))
49 |
50 | res = cv2.addWeighted(img, 0.5, mask_color, alpha, 0.4)
51 | if is_show:
52 | cv2.imshow("result", res)
53 | cv2.waitKey(0)
54 | return res, mask_color
55 |
56 |
57 | def draw_seg_by_dataset(img, seg, dataset, alpha=0.7, is_show=False, bgr_in=False):
58 | assert dataset in [
59 | _PASCAL,
60 | _CITYSCAPES,
61 | _MAPILLARY_VISTAS,
62 | _ADE20K,
63 | ], "dataset not support yet."
64 | img = np.asarray(img, dtype=np.uint8)
65 | if bgr_in:
66 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
67 |
68 | mask_color = np.asarray(label_to_color_image(seg, dataset), dtype=np.uint8)
69 | img_shape = img.shape
70 | mask_shape = mask_color.shape
71 | if img_shape != mask_shape:
72 | # resize mask to img shape
73 | mask_color = cv2.resize(mask_color, (img.shape[1], img.shape[0]))
74 |
75 | res = cv2.addWeighted(img, 0.3, mask_color, alpha, 0.6)
76 | if is_show:
77 | cv2.imshow("result", res)
78 | cv2.waitKey(0)
79 | return res, mask_color
80 |
--------------------------------------------------------------------------------
/alfred/modules/cabinet/split_txt.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 | """
25 |
26 | split txt file with ratios
27 |
28 | alfred cab split -f all.txt -r 0.1,0.8,0.1 -n train,val,test
29 |
30 | """
31 | import os
32 | import glob
33 | from alfred.utils.log import logger as logging
34 | import numpy as np
35 |
36 |
37 | def split_txt_file(f, ratios, names):
38 | assert os.path.exists(f), "{} not exist.".format(f)
39 | if not ratios:
40 | ratios = [0.2, 0.8]
41 | else:
42 | ratios = [float(i) for i in ratios.split(",")]
43 | logging.info("split ratios: {}".format(ratios))
44 |
45 | if not names:
46 | names = ["part_{}".format(i) for i in range(len(ratios))]
47 | else:
48 | names = names.split(",")
49 | names = [i + ".txt" for i in names]
50 | logging.info("split save to names: {}".format(names))
51 |
52 | a = sum(ratios)
53 | if a != 1.0:
54 | logging.info(
55 | "ratios: {} does not sum to 1. you must change it first.".format(ratios)
56 | )
57 | exit(1)
58 |
59 | # read txt file
60 | with open(f, "r") as f:
61 | lines = f.readlines()
62 | lines_no_empty = [i for i in lines if i != "" and i != "\n"]
63 | logging.info(
64 | "to split file have all {} lines. droped {} empty lines.".format(
65 | len(lines), len(lines) - len(lines_no_empty)
66 | )
67 | )
68 | lines = lines_no_empty
69 | # split with ratios
70 | last_lines = 0
71 | for i, r in enumerate(ratios):
72 | one = lines[last_lines : last_lines + int(r * len(lines))]
73 | with open(names[i], "w") as ff:
74 | ff.writelines(one)
75 | logging.info(
76 | "Part {} saved into: {}. portion: {}/{}={}".format(
77 | i, names[i], len(one), len(lines), len(one) / (len(lines))
78 | )
79 | )
80 | last_lines += len(one)
81 | logging.info("split done.")
82 |
--------------------------------------------------------------------------------
/examples/demo_p3d.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | try:
4 | from pytorch3d.io import load_obj, save_obj
5 | from pytorch3d.structures import Meshes
6 | from pytorch3d.utils import ico_sphere
7 | from pytorch3d.ops import sample_points_from_meshes
8 | from pytorch3d.loss import (
9 | chamfer_distance,
10 | mesh_edge_loss,
11 | mesh_laplacian_smoothing,
12 | mesh_normal_consistency,
13 | )
14 | from alfred.vis.renders.render_p3d import Renderer
15 | except ImportError:
16 | from alfred.vis.renders.render_prd import Renderer
17 | import trimesh
18 | import colorsys
19 | import pyrender
20 | import cv2
21 | import numpy as np
22 |
23 | import os
24 | import torch
25 | import matplotlib.pyplot as plt
26 | from skimage.io import imread
27 |
28 | # Util function for loading meshes
29 | from pytorch3d.io import load_objs_as_meshes, load_obj
30 |
31 | # Data structures and functions for rendering
32 | from pytorch3d.structures import Meshes
33 | from pytorch3d.vis.plotly_vis import AxisArgs, plot_batch_individually, plot_scene
34 | from pytorch3d.vis.texture_vis import texturesuv_image_matplotlib
35 | from pytorch3d.renderer import (
36 | look_at_view_transform,
37 | FoVPerspectiveCameras,
38 | PerspectiveCameras,
39 | PointLights,
40 | DirectionalLights,
41 | Materials,
42 | RasterizationSettings,
43 | MeshRenderer,
44 | MeshRasterizer,
45 | SoftPhongShader,
46 | TexturesUV,
47 | TexturesVertex,
48 | )
49 |
50 | import numpy as np
51 | from alfred.dl.torch.common import device, print_shape
52 |
53 | # add path for demo utils functions
54 | import sys
55 | import os
56 |
57 |
58 | """
59 | wget -P data/cow_mesh https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow.obj
60 | wget -P data/cow_mesh https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow.mtl
61 | wget -P data/cow_mesh https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow_texture.png
62 | """
63 | device = torch.device("cpu")
64 |
65 | obj_filename = os.path.join("./data/cow_mesh", "cow.obj")
66 |
67 | # Load obj file
68 | mesh = load_objs_as_meshes([obj_filename], device=device)
69 |
70 | R, T = look_at_view_transform(2.7, 0, 180)
71 | print(R, T)
72 | # R = torch.eye(3).unsqueeze(0)
73 | # R = torch.randn([1, 3, 3])
74 | R = torch.Tensor([[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]]).to(device)
75 | T = torch.Tensor([[-0.0098, -0.1803, 7.9463]]).to(device)
76 | print_shape(R, T)
77 | sfm_camera = PerspectiveCameras(device=device, R=R, T=T, focal_length=1000 / 224)
78 | width = 224
79 | height = 224
80 | raster_settings = RasterizationSettings(
81 | image_size=(height, width),
82 | blur_radius=0.0,
83 | faces_per_pixel=1,
84 | )
85 | lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]])
86 | sfm_renderer = MeshRenderer(
87 | rasterizer=MeshRasterizer(cameras=sfm_camera, raster_settings=raster_settings),
88 | shader=SoftPhongShader(device=device, cameras=sfm_camera, lights=lights),
89 | )
90 |
91 | print(mesh)
92 | images = sfm_renderer(mesh)
93 | cv2.imshow("aa", images[0, ..., :3].cpu().numpy())
94 | cv2.waitKey(0)
95 |
--------------------------------------------------------------------------------
/alfred/modules/vision/to_video.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import numpy as np
4 | from natsort import natsorted
5 | from colorama import Fore, init
6 |
7 | init(autoreset=True) # Auto-reset colorama after each print
8 |
9 | class VideoCombiner(object):
10 | def __init__(self, img_dir):
11 | self.img_dir = os.path.abspath(img_dir)
12 |
13 | if not os.path.exists(self.img_dir):
14 | print(Fore.RED + "=> Error: " + f"img_dir {self.img_dir} does not exist.")
15 | exit(1)
16 |
17 | self._get_video_shape()
18 |
19 | def _get_video_shape(self):
20 | # Filter and sort image files
21 | valid_extensions = {'.jpg', '.jpeg', '.png', '.bmp', '.tif', '.tiff'}
22 | all_files = os.listdir(self.img_dir)
23 |
24 | # Filter image files with valid extensions
25 | self.all_images = [
26 | os.path.join(self.img_dir, f)
27 | for f in all_files
28 | if os.path.splitext(f)[1].lower() in valid_extensions
29 | ]
30 |
31 | if not self.all_images:
32 | print(Fore.RED + "=> Error: " + f"No valid image files found in {self.img_dir}")
33 | exit(1)
34 |
35 | # Natural sort the images
36 | self.all_images = natsorted(self.all_images)
37 |
38 | # Get video shape from first image (more reliable than random)
39 | sample_img = self.all_images[0]
40 | img = cv2.imread(sample_img)
41 |
42 | if img is None:
43 | print(Fore.RED + "=> Error: " + f"Failed to read sample image {sample_img}")
44 | exit(1)
45 |
46 | self.video_shape = img.shape
47 |
48 | def combine(self, target_file="combined.mp4"):
49 | size = (self.video_shape[1], self.video_shape[0])
50 | print("=> Target video frame size:", size)
51 | print(f"=> Total {len(self.all_images)} frames to process")
52 |
53 | # Create video writer with correct parameters
54 | fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Better compatibility than DIVX
55 | video_writer = cv2.VideoWriter(target_file, fourcc, 30, size)
56 |
57 | if not video_writer.isOpened():
58 | print(Fore.RED + "=> Error: " + "Failed to initialize video writer")
59 | exit(1)
60 |
61 | print("=> Processing frames...")
62 | for i, img_path in enumerate(self.all_images, 1):
63 | img = cv2.imread(img_path)
64 | if img is None:
65 | print(Fore.YELLOW + f"=> Warning: Skipped corrupted/invalid file {img_path}")
66 | continue
67 |
68 | # Ensure consistent frame size
69 | if img.shape != self.video_shape:
70 | img = cv2.resize(img, size)
71 |
72 | video_writer.write(img)
73 | if i % 100 == 0:
74 | print(f"=> Processed {i}/{len(self.all_images)} frames")
75 |
76 | video_writer.release()
77 | print(Fore.GREEN + f"=> Success: Video saved as {os.path.abspath(target_file)}")
78 |
79 | # Example usage
80 | if __name__ == "__main__":
81 | combiner = VideoCombiner("path/to/your/images")
82 | combiner.combine("output_video.mp4")
--------------------------------------------------------------------------------
/alfred/modules/data/coco2yolo.py:
--------------------------------------------------------------------------------
1 | """
2 |
3 |
4 | copy and paste coco annotation
5 | to yolo
6 |
7 | """
8 |
9 | import os
10 | import sys
11 |
12 | try:
13 | from pycocotools.coco import COCO
14 | from pycocotools import mask as maskUtils
15 | except ImportError as e:
16 | print("[WARN] coco2yolo need pycocotools installed.")
17 | # exit(-1)
18 | import numpy as np
19 | import matplotlib.pyplot as plt
20 | from matplotlib.collections import PatchCollection
21 | from matplotlib.patches import Polygon
22 | from alfred.utils.log import logger as logging
23 | import cv2
24 | from alfred.vis.image.det import visualize_det_cv2_part
25 | from alfred.vis.image.common import get_unique_color_by_id
26 | import shutil
27 |
28 |
29 | def convert(size, box):
30 | dw = 1.0 / (size[0])
31 | dh = 1.0 / (size[1])
32 | x = (box[0] + box[1]) / 2.0 - 1
33 | y = (box[2] + box[3]) / 2.0 - 1
34 | w = box[1] - box[0]
35 | h = box[3] - box[2]
36 | x = x * dw
37 | w = w * dw
38 | y = y * dh
39 | h = h * dh
40 | return (x, y, w, h)
41 |
42 |
43 | def coco2yolo(img_r, j_f):
44 | data_dir = img_r
45 | coco = COCO(j_f)
46 |
47 | cats = coco.loadCats(coco.getCatIds())
48 | logging.info("cats: {}".format(cats))
49 | print("cls list for yolo\n")
50 | for i in range(len(cats)):
51 | print(cats[i]["name"])
52 | print("\n")
53 | print("all {} categories.".format(len(cats)))
54 |
55 | img_ids = coco.getImgIds()
56 |
57 | target_txt_r = os.path.join(os.path.dirname(img_r), "yolo", "labels")
58 | target_img_r = os.path.join(os.path.dirname(img_r), "yolo", "images")
59 | os.makedirs(target_txt_r, exist_ok=True)
60 | os.makedirs(target_img_r, exist_ok=True)
61 |
62 | print("solving, this gonna take some while...")
63 | for img_id in img_ids:
64 | img = coco.loadImgs(img_id)[0]
65 | # print('checking img: {}, id: {}'.format(img, img_id))
66 | # img['file_name'] may be not basename
67 | img_f = os.path.join(data_dir, os.path.basename(img["file_name"]))
68 | if not os.path.exists(img_f):
69 | # if not then pull it back to normal mode
70 | img_f = os.path.join(data_dir, img["file_name"])
71 | anno_ids = coco.getAnnIds(imgIds=img["id"])
72 | annos = coco.loadAnns(anno_ids)
73 |
74 | img_root, img_ext = os.path.splitext(img_f)
75 | out_file = open(
76 | os.path.join(target_txt_r, os.path.basename(img_root) + ".txt"), "w"
77 | )
78 | # out_file = open(os.path.join(target_txt_r, os.path.basename(img_f).split('.')[0] + '.txt'), 'w')
79 | img = cv2.imread(img_f)
80 | h, w, _ = img.shape
81 | shutil.copy(img_f, os.path.join(target_img_r, os.path.basename(img_f)))
82 | for ann in annos:
83 | b = ann["bbox"]
84 | x1 = int(b[0])
85 | y1 = int(b[1])
86 | x2 = int(x1 + b[2])
87 | y2 = int(y1 + b[3])
88 | cls_id = ann["category_id"]
89 | b = [x1, x2, y1, y2]
90 | bb = convert((w, h), b)
91 | out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + "\n")
92 | out_file.close()
93 | print("convert to yolo done!")
94 |
--------------------------------------------------------------------------------
/alfred/deprecated/dl/data/meta/dataset_mixin.py:
--------------------------------------------------------------------------------
1 | import numpy
2 | import six
3 |
4 | # below code from Chianer
5 |
6 |
7 | class DatasetMixin(object):
8 |
9 | """Default implementation of dataset indexing.
10 |
11 | DatasetMixin provides the :meth:`__getitem__` operator. The default
12 | implementation uses :meth:`get_example` to extract each example, and
13 | combines the results into a list. This mixin makes it easy to implement a
14 | new dataset that does not support efficient slicing.
15 |
16 | Dataset implementation using DatasetMixin still has to provide the
17 | :meth:`__len__` operator explicitly.
18 |
19 | """
20 |
21 | def __getitem__(self, index):
22 | """Returns an example or a sequence of examples.
23 |
24 | It implements the standard Python indexing and one-dimensional integer
25 | array indexing. It uses the :meth:`get_example` method by default, but
26 | it may be overridden by the implementation to, for example, improve the
27 | slicing performance.
28 |
29 | Args:
30 | index (int, slice, list or numpy.ndarray): An index of an example
31 | or indexes of examples.
32 |
33 | Returns:
34 | If index is int, returns an example created by `get_example`.
35 | If index is either slice or one-dimensional list or numpy.ndarray,
36 | returns a list of examples created by `get_example`.
37 |
38 | .. admonition:: Example
39 |
40 | >>> import numpy
41 | >>> from chainer import dataset
42 | >>> class SimpleDataset(dataset.DatasetMixin):
43 | ... def __init__(self, values):
44 | ... self.values = values
45 | ... def __len__(self):
46 | ... return len(self.values)
47 | ... def get_example(self, i):
48 | ... return self.values[i]
49 | ...
50 | >>> ds = SimpleDataset([0, 1, 2, 3, 4, 5])
51 | >>> ds[1] # Access by int
52 | 1
53 | >>> ds[1:3] # Access by slice
54 | [1, 2]
55 | >>> ds[[4, 0]] # Access by one-dimensional integer list
56 | [4, 0]
57 | >>> index = numpy.arange(3)
58 | >>> ds[index] # Access by one-dimensional integer numpy.ndarray
59 | [0, 1, 2]
60 |
61 | """
62 | if isinstance(index, slice):
63 | current, stop, step = index.indices(len(self))
64 | return [self.get_example(i) for i in six.moves.range(current, stop, step)]
65 | elif isinstance(index, list) or isinstance(index, numpy.ndarray):
66 | return [self.get_example(i) for i in index]
67 | else:
68 | return self.get_example(index)
69 |
70 | def __len__(self):
71 | """Returns the number of data points."""
72 | raise NotImplementedError
73 |
74 | def get_example(self, i):
75 | """Returns the i-th example.
76 |
77 | Implementations should override it. It should raise :class:`IndexError`
78 | if the index is invalid.
79 |
80 | Args:
81 | i (int): The index of the example.
82 |
83 | Returns:
84 | The i-th example.
85 |
86 | """
87 | raise NotImplementedError
88 |
--------------------------------------------------------------------------------
/examples/pykitti_test.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 | import pykitti
25 | import cv2
26 | import numpy as np
27 | from alfred.vis.pointcloud.pointcloud_vis import draw_pcs_open3d
28 |
29 |
30 | base_dir = "/media/jintain/sg/permanent/datasets/KITTI/videos"
31 | date = "2011_09_26"
32 | drive = "0051"
33 |
34 | data = pykitti.raw(base_dir, date, drive)
35 | print(data.cam2_files)
36 | cam2_img = data.get_rgb(3)[1]
37 | print(cam2_img)
38 | cv2.imshow("rr", np.array(cam2_img))
39 |
40 | # data.calib.T_cam0_velo.dot(point_velo)
41 | res = [
42 | [12.189727, 4.65575, -1.0090133, 1.6713146, 3.9860756, 1.4752198, 1.4311914],
43 | [7.0290184, 18.43234, -1.0616484, 1.5949062, 3.7942128, 1.4587526, 0.03434156],
44 | [9.716782, 18.663864, -1.081424, 1.6270422, 4.0220504, 1.428338, 0.010275014],
45 | [12.390503, 18.554394, -1.0709403, 1.5716408, 3.8583813, 1.4068353, 0.092568964],
46 | [9.162392, -3.2395134, -0.9900443, 0.48879692, 1.7805163, 1.780584, 4.7180395],
47 | [1.5449369, 19.820513, -1.1250883, 1.61444, 4.0291963, 1.4679328, 0.20142984],
48 | [15.010401, 17.861265, -0.61177015, 1.8016329, 4.52904, 1.9179995, -0.0009133518],
49 | [0.2915942, 14.302571, -1.6358033, 0.6031256, 1.7338636, 1.693197, 2.0567284],
50 | [32.58985, 16.622143, -0.9154575, 1.56024, 3.6420622, 1.4507264, 1.5841204],
51 | [10.96289, 33.31957, -1.8625767, 1.6718575, 4.1056437, 1.5355072, -0.5065325],
52 | [-20.711775, 12.870968, -1.3916719, 0.6494945, 0.6588189, 1.7635618, 2.878424],
53 | [-14.706663, 14.144306, -1.4347086, 0.5646943, 1.7102921, 1.7303042, 1.6427889],
54 | [-34.937218, -32.419926, -1.9705622, 2.0217955, 6.3850527, 2.5362377, 0.9260524],
55 | [-25.85193, 13.433075, -1.6172849, 0.5029159, 1.7657202, 1.6948656, 1.8433876],
56 | [-8.7119255, 15.603356, -0.861634, 0.61332655, 1.7866454, 1.7575798, -0.15929039],
57 | [0.44268692, -31.126797, -1.4658432, 0.6214817, 1.778398, 1.6685283, 2.7185097],
58 | [-1.3864591, 43.80352, -1.6687126, 1.990596, 5.726587, 2.5764484, 0.53529406],
59 | [-46.30665, -24.680546, -1.5553175, 0.54056036, 1.8155692, 1.7282323, 1.4364488],
60 | [-25.206638, 14.19597, -1.6388608, 0.60298264, 0.6539766, 1.7206633, 2.6259918],
61 | [42.099804, 16.609531, -0.95861834, 1.6101078, 3.805344, 1.5348499, 1.4423454],
62 | ]
63 |
64 | for p in res:
65 | pts3d_c = p[:3]
66 | cam0_xyz = data.calib.Tr
67 |
68 | cv2.waitKey(0)
69 |
--------------------------------------------------------------------------------
/alfred/vis/mesh3d/assets/sphere_vertices_8.txt:
--------------------------------------------------------------------------------
1 | 0.000 0.000 1.000
2 | 0.000 0.000 -1.000
3 | 0.383 0.000 0.924
4 | 0.354 0.146 0.924
5 | 0.271 0.271 0.924
6 | 0.146 0.354 0.924
7 | 0.000 0.383 0.924
8 | -0.146 0.354 0.924
9 | -0.271 0.271 0.924
10 | -0.354 0.146 0.924
11 | -0.383 0.000 0.924
12 | -0.354 -0.146 0.924
13 | -0.271 -0.271 0.924
14 | -0.146 -0.354 0.924
15 | -0.000 -0.383 0.924
16 | 0.146 -0.354 0.924
17 | 0.271 -0.271 0.924
18 | 0.354 -0.146 0.924
19 | 0.707 0.000 0.707
20 | 0.653 0.271 0.707
21 | 0.500 0.500 0.707
22 | 0.271 0.653 0.707
23 | 0.000 0.707 0.707
24 | -0.271 0.653 0.707
25 | -0.500 0.500 0.707
26 | -0.653 0.271 0.707
27 | -0.707 0.000 0.707
28 | -0.653 -0.271 0.707
29 | -0.500 -0.500 0.707
30 | -0.271 -0.653 0.707
31 | -0.000 -0.707 0.707
32 | 0.271 -0.653 0.707
33 | 0.500 -0.500 0.707
34 | 0.653 -0.271 0.707
35 | 0.924 0.000 0.383
36 | 0.854 0.354 0.383
37 | 0.653 0.653 0.383
38 | 0.354 0.854 0.383
39 | 0.000 0.924 0.383
40 | -0.354 0.854 0.383
41 | -0.653 0.653 0.383
42 | -0.854 0.354 0.383
43 | -0.924 0.000 0.383
44 | -0.854 -0.354 0.383
45 | -0.653 -0.653 0.383
46 | -0.354 -0.854 0.383
47 | -0.000 -0.924 0.383
48 | 0.354 -0.854 0.383
49 | 0.653 -0.653 0.383
50 | 0.854 -0.354 0.383
51 | 1.000 0.000 0.000
52 | 0.924 0.383 0.000
53 | 0.707 0.707 0.000
54 | 0.383 0.924 0.000
55 | 0.000 1.000 0.000
56 | -0.383 0.924 0.000
57 | -0.707 0.707 0.000
58 | -0.924 0.383 0.000
59 | -1.000 0.000 0.000
60 | -0.924 -0.383 0.000
61 | -0.707 -0.707 0.000
62 | -0.383 -0.924 0.000
63 | -0.000 -1.000 0.000
64 | 0.383 -0.924 0.000
65 | 0.707 -0.707 0.000
66 | 0.924 -0.383 0.000
67 | 0.924 0.000 -0.383
68 | 0.854 0.354 -0.383
69 | 0.653 0.653 -0.383
70 | 0.354 0.854 -0.383
71 | 0.000 0.924 -0.383
72 | -0.354 0.854 -0.383
73 | -0.653 0.653 -0.383
74 | -0.854 0.354 -0.383
75 | -0.924 0.000 -0.383
76 | -0.854 -0.354 -0.383
77 | -0.653 -0.653 -0.383
78 | -0.354 -0.854 -0.383
79 | -0.000 -0.924 -0.383
80 | 0.354 -0.854 -0.383
81 | 0.653 -0.653 -0.383
82 | 0.854 -0.354 -0.383
83 | 0.707 0.000 -0.707
84 | 0.653 0.271 -0.707
85 | 0.500 0.500 -0.707
86 | 0.271 0.653 -0.707
87 | 0.000 0.707 -0.707
88 | -0.271 0.653 -0.707
89 | -0.500 0.500 -0.707
90 | -0.653 0.271 -0.707
91 | -0.707 0.000 -0.707
92 | -0.653 -0.271 -0.707
93 | -0.500 -0.500 -0.707
94 | -0.271 -0.653 -0.707
95 | -0.000 -0.707 -0.707
96 | 0.271 -0.653 -0.707
97 | 0.500 -0.500 -0.707
98 | 0.653 -0.271 -0.707
99 | 0.383 0.000 -0.924
100 | 0.354 0.146 -0.924
101 | 0.271 0.271 -0.924
102 | 0.146 0.354 -0.924
103 | 0.000 0.383 -0.924
104 | -0.146 0.354 -0.924
105 | -0.271 0.271 -0.924
106 | -0.354 0.146 -0.924
107 | -0.383 0.000 -0.924
108 | -0.354 -0.146 -0.924
109 | -0.271 -0.271 -0.924
110 | -0.146 -0.354 -0.924
111 | -0.000 -0.383 -0.924
112 | 0.146 -0.354 -0.924
113 | 0.271 -0.271 -0.924
114 | 0.354 -0.146 -0.924
115 |
--------------------------------------------------------------------------------
/alfred/modules/data/view_yolo.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 | """
25 |
26 | view txt labeled detection data
27 |
28 |
29 | """
30 | import os
31 | import sys
32 | import cv2
33 | from glob import glob
34 | import os
35 | import sys
36 | import cv2
37 | from alfred.utils.log import logger as logging
38 |
39 |
40 | def vis_det_yolo(img_root, label_root):
41 | logging.info("img root: {}, label root: {}".format(img_root, label_root))
42 | # auto detection .jpg or .png images
43 | txt_files = glob(os.path.join(label_root, "*.txt"))
44 | for txt_f in txt_files:
45 | txt_root, txt_ext = os.path.splitext(txt_f)
46 | img_f = os.path.join(img_root, os.path.basename(txt_root) + ".jpg")
47 | # img_f = os.path.join(img_root, os.path.basename(txt_f).split('.')[0] + '.jpg')
48 | if os.path.exists(img_f):
49 | img = cv2.imread(img_f)
50 | h, w, _ = img.shape
51 | if os.path.exists(txt_f):
52 | with open(txt_f) as f:
53 | annos = f.readlines()
54 | for ann in annos:
55 | ann = ann.strip().split(" ")
56 | category = ann[0]
57 | x = float(ann[1]) * w
58 | y = float(ann[2]) * h
59 | bw = float(ann[3]) * w
60 | bh = float(ann[4]) * h
61 | xmin = int(x - bw / 2)
62 | ymin = int(y - bh / 2)
63 | xmax = int(x + bw / 2)
64 | ymax = int(y + bh / 2)
65 | print(xmin, ymin, xmax, ymax, category)
66 | cv2.putText(
67 | img,
68 | category,
69 | (xmin, ymin),
70 | cv2.FONT_HERSHEY_COMPLEX,
71 | 0.7,
72 | (255, 255, 255),
73 | )
74 | cv2.rectangle(
75 | img, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2, 1
76 | )
77 | cv2.imshow("yolo check", img)
78 | ch = cv2.waitKey(0)
79 | if ch == 27:
80 | exit()
81 | else:
82 | logging.warning("xxxx image: {} not found.".format(img_f))
83 |
84 |
85 | if __name__ == "__main__":
86 | vis_det_txt(sys.argv[1], sys.argv[2])
87 |
--------------------------------------------------------------------------------
/alfred/modules/vision/video_extractor.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (c) 2020 JinTian.
4 | #
5 | # This file is part of alfred
6 | # (see http://jinfagang.github.io).
7 | #
8 | # Licensed to the Apache Software Foundation (ASF) under one
9 | # or more contributor license agreements. See the NOTICE file
10 | # distributed with this work for additional information
11 | # regarding copyright ownership. The ASF licenses this file
12 | # to you under the Apache License, Version 2.0 (the
13 | # "License"); you may not use this file except in compliance
14 | # with the License. You may obtain a copy of the License at
15 | #
16 | # http://www.apache.org/licenses/LICENSE-2.0
17 | #
18 | # Unless required by applicable law or agreed to in writing,
19 | # software distributed under the License is distributed on an
20 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
21 | # KIND, either express or implied. See the License for the
22 | # specific language governing permissions and limitations
23 | # under the License.
24 | #
25 | import os
26 | import sys
27 |
28 | try:
29 | import cv2
30 | except ImportError:
31 | print(
32 | "you are not install opencv-python, using pip install opencv-python install it."
33 | )
34 | from colorama import Fore, Back, Style
35 |
36 |
37 | class VideoExtractor(object):
38 | def __init__(self, jump_frames=6, save_format="frame_%06d.jpg"):
39 | """
40 | we set frames to jump, etc, using jump_frames=6
41 | will save one frame per 6 frames jumped
42 | :param jump_frames:
43 | :param save_format: this is the frames save format
44 | users can decide what's the format is: frame_0000004.jpg
45 | """
46 | self.current_frame = 0
47 | self.current_save_frame = 0
48 | if jump_frames:
49 | self.jump_frames = int(jump_frames)
50 | else:
51 | self.jump_frames = 6
52 | self.save_format = save_format
53 |
54 | def extract(self, video_f):
55 | if os.path.exists(video_f) and os.path.isfile(video_f):
56 | cap = cv2.VideoCapture(video_f)
57 | f_n = os.path.basename(video_f).split(".")[0]
58 |
59 | save_dir = os.path.join(
60 | os.path.dirname(video_f), os.path.basename(video_f).split(".")[0]
61 | )
62 | if not os.path.exists(save_dir):
63 | os.makedirs(save_dir)
64 |
65 | res = True
66 | while res:
67 | res, image = cap.read()
68 | self.current_frame += 1
69 | if self.current_frame % self.jump_frames == 0:
70 | print(
71 | "Read frame: {} jump frames: {}".format(
72 | self.current_frame, self.jump_frames
73 | )
74 | )
75 | cv2.imwrite(
76 | os.path.join(
77 | save_dir,
78 | f_n + "_" + self.save_format % self.current_save_frame,
79 | ),
80 | image,
81 | )
82 | self.current_save_frame += 1
83 |
84 | print(Fore.GREEN + Style.BRIGHT)
85 | print("Success!")
86 | else:
87 | print(Fore.RED + Style.BRIGHT)
88 | print("Error! " + Style.RESET_ALL + "{} not exist.".format(video_f))
89 |
--------------------------------------------------------------------------------
/alfred/deprecated/dl/torch/common.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 | """
25 | Common utility of pytorch
26 |
27 | this contains code that frequently used while writing torch applications
28 |
29 | """
30 | import itertools
31 | import inspect
32 | from json.tool import main
33 | from colorama import Fore, Back, Style
34 | import numpy as np
35 |
36 | try:
37 | import torch
38 |
39 | device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
40 | except ImportError:
41 | device = None
42 | torch_installed = False
43 |
44 |
45 | def print_tensor(t, label=None, ignore_value=True):
46 | if isinstance(t, torch.Tensor):
47 | if label:
48 | print(Fore.YELLOW + Style.BRIGHT + "-> {}".format(label) + Style.RESET_ALL)
49 | else:
50 | print(Fore.YELLOW + Style.BRIGHT + "tensor: " + Style.RESET_ALL)
51 | if ignore_value:
52 | print("shape: {}\ndtype: {} {}\n".format(t.shape, t.dtype, t.device))
53 | else:
54 | print("value: {}\nshape: {}\ndtype: {}\n".format(t, t.shape, t.dtype))
55 |
56 | else:
57 | print("{} is not a tensor.".format(t))
58 |
59 |
60 | def decorator(f):
61 | def wrapper(*args, **kwargs):
62 | bound_args = inspect.signature(f).bind(*args, **kwargs)
63 | bound_args.apply_defaults()
64 |
65 | frame = inspect.currentframe()
66 | frame = inspect.getouterframes(frame)[1]
67 | string = inspect.getframeinfo(frame[0]).code_context[0].strip()
68 | args_ori_names = string[string.find("(") + 1 : -1].split(",")
69 |
70 | names = []
71 | for i in args_ori_names:
72 | if i.find("=") != -1:
73 | names.append(i.split("=")[1].strip())
74 | else:
75 | names.append(i)
76 | args_dict = dict(zip(names, args))
77 | for k, v in args_dict.items():
78 | k = k.strip()
79 | if isinstance(v, torch.Tensor):
80 | print(f"[{k}]: ", v.shape, v.device, v.dtype)
81 | else:
82 | print(f"[{k}]: ", v.shape)
83 | return f(*args, **kwargs)
84 |
85 | return wrapper
86 |
87 |
88 | @decorator
89 | def print_shape(*vs):
90 | pass
91 |
92 |
93 | if __name__ == "__main__":
94 | cam = torch.randn([4, 5, 300])
95 | pose = torch.randn([1, 44, 55])
96 | # print_shape(locals(), cam, pose)
97 | print_shape(cam, pose)
98 |
--------------------------------------------------------------------------------
/alfred/modules/vision/video_reducer.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Copyright (c) 2020 JinTian.
4 | #
5 | # This file is part of alfred
6 | # (see http://jinfagang.github.io).
7 | #
8 | # Licensed to the Apache Software Foundation (ASF) under one
9 | # or more contributor license agreements. See the NOTICE file
10 | # distributed with this work for additional information
11 | # regarding copyright ownership. The ASF licenses this file
12 | # to you under the Apache License, Version 2.0 (the
13 | # "License"); you may not use this file except in compliance
14 | # with the License. You may obtain a copy of the License at
15 | #
16 | # http://www.apache.org/licenses/LICENSE-2.0
17 | #
18 | # Unless required by applicable law or agreed to in writing,
19 | # software distributed under the License is distributed on an
20 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
21 | # KIND, either express or implied. See the License for the
22 | # specific language governing permissions and limitations
23 | # under the License.
24 | #
25 | import os
26 | import sys
27 | import cv2
28 | from colorama import Fore, Back, Style
29 | from alfred.utils.log import logger as logging
30 |
31 |
32 | class VideoReducer(object):
33 | def __init__(self, jump_frames=6):
34 | """
35 | we set frames to jump, etc, using jump_frames=6
36 | will save one frame per 6 frames jumped
37 | :param jump_frames:
38 | :param save_format: this is the frames save format
39 | users can decide what's the format is: frame_0000004.jpg
40 | """
41 | self.current_frame = 0
42 | self.current_save_frame = 0
43 | if jump_frames:
44 | self.jump_frames = int(jump_frames)
45 | else:
46 | self.jump_frames = 6
47 |
48 | def act(self, video_f):
49 | """
50 | reduce the video frame by drop frames
51 |
52 | """
53 | if os.path.exists(video_f) and os.path.isfile(video_f):
54 | logging.info("start to reduce file: {}".format(video_f))
55 | cap = cv2.VideoCapture(video_f)
56 | target_f = os.path.join(
57 | os.path.dirname(video_f),
58 | os.path.basename(video_f).split(".")[0] + "_reduced.mp4",
59 | )
60 | size = (int(cap.get(3)), int(cap.get(4)))
61 | logging.info(
62 | "video size: {}, will support reduce size in the future.".format(size)
63 | )
64 | video_writer = cv2.VideoWriter(
65 | target_f, cv2.VideoWriter_fourcc(*"DIVX"), 24, size
66 | )
67 | res = True
68 | while res:
69 | res, image = cap.read()
70 | self.current_frame += 1
71 | if (
72 | self.current_frame % self.jump_frames == 0
73 | ) or self.current_frame < 15:
74 | print(
75 | "Read frame: {} jump frames: {}".format(
76 | self.current_frame, self.jump_frames
77 | )
78 | )
79 | self.current_save_frame += 1
80 | video_writer.write(image)
81 | video_writer.release()
82 | logging.info("reduced video file has been saved into: {}".format(target_f))
83 | else:
84 | print(Fore.RED + Style.BRIGHT)
85 | print("Error! " + Style.RESET_ALL + "{} not exist.".format(video_f))
86 |
--------------------------------------------------------------------------------
/alfred/deprecated/dl/data/meta/getter_dataset.py:
--------------------------------------------------------------------------------
1 | from .sliceable_dataset import _as_key_indices
2 | from .sliceable_dataset import _is_iterable
3 | from .sliceable_dataset import SliceableDataset
4 |
5 |
6 | class GetterDataset(SliceableDataset):
7 | """A sliceable dataset class that is defined with getters.
8 |
9 | This is a dataset class with getters.
10 | Please refer to the tutorial for more detailed explanation.
11 |
12 | Here is an example.
13 |
14 | >>> class SliceableLabeledImageDataset(GetterDataset):
15 | >>> def __init__(self, pairs, root='.'):
16 | >>> super(SliceableLabeledImageDataset, self).__init__()
17 | >>> with open(pairs) as f:
18 | >>> self._pairs = [l.split() for l in f]
19 | >>> self._root = root
20 | >>>
21 | >>> self.add_getter('img', self.get_image)
22 | >>> self.add_getter('label', self.get_label)
23 | >>>
24 | >>> def __len__(self):
25 | >>> return len(self._pairs)
26 | >>>
27 | >>> def get_image(self, i):
28 | >>> path, _ = self._pairs[i]
29 | >>> return read_image(os.path.join(self._root, path))
30 | >>>
31 | >>> def get_label(self, i):
32 | >>> _, label = self._pairs[i]
33 | >>> return np.int32(label)
34 | >>>
35 | >>> dataset = SliceableLabeledImageDataset('list.txt')
36 | >>>
37 | >>> # get a subset with label = 0, 1, 2
38 | >>> # no images are loaded
39 | >>> indices = [i for i, label in
40 | ... enumerate(dataset.slice[:, 'label']) if label in {0, 1, 2}]
41 | >>> dataset_012 = dataset.slice[indices]
42 | """
43 |
44 | def __init__(self):
45 | self._keys = []
46 | self._getters = []
47 | self._return_tuple = True
48 |
49 | def __len__(self):
50 | raise NotImplementedError
51 |
52 | @property
53 | def keys(self):
54 | if self._return_tuple:
55 | return tuple(key for key, _, _ in self._keys)
56 | else:
57 | return self._keys[0][0]
58 |
59 | @keys.setter
60 | def keys(self, keys):
61 | self._keys = [
62 | self._keys[key_index] for key_index in _as_key_indices(keys, self.keys)
63 | ]
64 | self._return_tuple = _is_iterable(keys)
65 |
66 | def add_getter(self, keys, getter):
67 | """Register a getter function
68 |
69 | Args:
70 | keys (string or tuple of strings): The name(s) of data
71 | that the getter function returns.
72 | getter (callable): A getter function that takes an index and
73 | returns data of the corresponding example.
74 | """
75 | self._getters.append(getter)
76 | if _is_iterable(keys):
77 | for key_index, key in enumerate(keys):
78 | self._keys.append((key, len(self._getters) - 1, key_index))
79 | else:
80 | self._keys.append((keys, len(self._getters) - 1, None))
81 |
82 | def get_example_by_keys(self, index, key_indices):
83 | example = []
84 | cache = {}
85 | for key_index in key_indices:
86 | _, getter_index, key_index = self._keys[key_index]
87 | if getter_index not in cache:
88 | cache[getter_index] = self._getters[getter_index](index)
89 | if key_index is None:
90 | example.append(cache[getter_index])
91 | else:
92 | example.append(cache[getter_index][key_index])
93 | return tuple(example)
94 |
--------------------------------------------------------------------------------
/alfred/modules/vision/combine_img_column.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import argparse
4 | import cv2
5 | import numpy as np
6 | from natsort import natsorted
7 |
8 |
9 | def parse_args():
10 | parser = argparse.ArgumentParser(
11 | description="Concatenate images in a specified shape."
12 | )
13 | parser.add_argument(
14 | "-d", "--dir", required=True, help="Directory containing images"
15 | )
16 | parser.add_argument(
17 | "-s",
18 | "--shape",
19 | required=True,
20 | help="Shape to concatenate images, e.g., 3x3 or 3xN or Nx5",
21 | )
22 | parser.add_argument(
23 | "-p",
24 | "--padding",
25 | type=int,
26 | default=0,
27 | help="Padding size to add around each image",
28 | )
29 | return parser.parse_args()
30 |
31 |
32 | def load_images(image_dir):
33 | image_files = [
34 | f
35 | for f in os.listdir(image_dir)
36 | if f.lower().endswith((".png", ".jpg", ".jpeg", ".bmp", ".gif"))
37 | ]
38 | image_files = natsorted(image_files)
39 | images = [cv2.imread(os.path.join(image_dir, f)) for f in image_files]
40 | return images
41 |
42 |
43 | def create_black_image(shape, dtype=np.uint8):
44 | return np.zeros(shape, dtype=dtype)
45 |
46 |
47 | def concat_images(images, shape, padding):
48 | if "xN" in shape:
49 | rows = int(shape.split("x")[0])
50 | cols = (len(images) + rows - 1) // rows # Calculate columns needed
51 | elif "Nx" in shape:
52 | cols = int(shape.split("x")[1])
53 | rows = (len(images) + cols - 1) // cols # Calculate rows needed
54 | else:
55 | rows, cols = map(int, shape.split("x"))
56 |
57 | max_height = max(image.shape[0] for image in images) + 2 * padding
58 | max_width = max(image.shape[1] for image in images) + 2 * padding
59 | channels = images[0].shape[2] if len(images[0].shape) == 3 else 1
60 |
61 | white_image = create_black_image((max_height, max_width, channels)) + 255
62 |
63 | grid = []
64 | for r in range(rows):
65 | row_images = []
66 | for c in range(cols):
67 | idx = r * cols + c
68 | if idx < len(images):
69 | img = images[idx]
70 | img_padded = cv2.copyMakeBorder(
71 | img,
72 | padding,
73 | padding,
74 | padding,
75 | padding,
76 | cv2.BORDER_CONSTANT,
77 | value=[255, 255, 255],
78 | )
79 | img_padded = cv2.copyMakeBorder(
80 | img_padded,
81 | 0,
82 | max_height - img_padded.shape[0],
83 | 0,
84 | max_width - img_padded.shape[1],
85 | cv2.BORDER_CONSTANT,
86 | value=[0, 0, 0],
87 | )
88 | else:
89 | img_padded = white_image
90 | row_images.append(img_padded)
91 | grid.append(np.hstack(row_images))
92 | return np.vstack(grid)
93 |
94 |
95 | def main():
96 | args = parse_args()
97 | images = load_images(args.dir)
98 | concatenated_image = concat_images(images, args.shape, args.padding)
99 | cv2.imwrite("concatenated_image.jpg", concatenated_image)
100 | print("Concatenated image saved as 'concatenated_image.jpg'")
101 |
102 |
103 | # if __name__ == "__main__":
104 | # main()
105 |
--------------------------------------------------------------------------------
/alfred/deploy/tensorrt/process.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | import numpy as np
15 | from PIL import Image
16 | import cv2
17 | import torch
18 | from torch import nn
19 | import torchvision.transforms as T
20 |
21 | import pycuda.driver as cuda
22 | import pycuda.autoinit
23 |
24 | try:
25 | import cupy as cp
26 | except ImportError:
27 | print(
28 | "You should install cupy for preprocess with CUDA: https://docs.cupy.dev/en/stable/install.html"
29 | )
30 | # from cupy.core.dlpack import toDlpack
31 | # from cupy.core.dlpack import fromDlpack
32 | from torch.utils.dlpack import to_dlpack
33 | from torch.utils.dlpack import from_dlpack
34 | from alfred.dl.torch.common import device
35 |
36 |
37 | def preprocess_np(img_path):
38 | """process use numpy"""
39 | im = Image.open(img_path)
40 | img = im.resize((800, 800), Image.BILINEAR)
41 | img = np.array(img).astype(np.float32) / 255.0
42 | img = img.transpose(2, 0, 1)
43 | # print(img.shape)
44 | img = (img - np.array([[[0.485]], [[0.456]], [[0.406]]])) / np.array(
45 | [[[0.229]], [[0.224]], [[0.225]]]
46 | )
47 |
48 | # img = img.transpose(1,2,0)
49 | img = np.expand_dims(img, axis=0)
50 | img = np.ascontiguousarray(img)
51 | img = np.array(img).astype(np.float32)
52 |
53 | return img, im, im.size
54 |
55 |
56 | class PyTorchTensorHolder(pycuda.driver.PointerHolderBase):
57 | """代码来源:
58 | https://github.com/NVIDIA/trt-samples-for-hackathon-cn/blob/master/python/app_onnx_resnet50.py
59 | """
60 |
61 | def __init__(self, tensor):
62 | super(PyTorchTensorHolder, self).__init__()
63 | self.tensor = tensor
64 |
65 | def get_pointer(self):
66 | return self.tensor.data_ptr()
67 |
68 |
69 | transform = T.Compose(
70 | [
71 | T.Resize((800, 800)), # PIL.Image.BILINEAR
72 | T.ToTensor(),
73 | # T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
74 | ]
75 | )
76 |
77 |
78 | def preprocess_torch(img_path):
79 | """process use torchvision"""
80 | im = Image.open(img_path)
81 | img = transform(im).unsqueeze(0)
82 | img = PyTorchTensorHolder(img)
83 | return img, im, im.size
84 |
85 |
86 | def preprocess_torch_v1(img_path):
87 | im = Image.open(img_path)
88 | img = transform(im).unsqueeze(0).cpu().numpy()
89 | return img, im, im.size
90 |
91 |
92 | def preprocess_np_no_normalize(img_path):
93 | im = cv2.imread(img_path)
94 | print(img_path)
95 | print(im.shape)
96 | # img = transform(im).unsqueeze(0)
97 | a = np.transpose(im, (2, 0, 1)).astype(np.float32)
98 | return a, im
99 |
100 |
101 | def preprocess_cu(img_np):
102 | mean_cp = cp.array([[[0.485]], [[0.456]], [[0.406]]])
103 | std_cp = cp.array([[[0.229]], [[0.224]], [[0.225]]])
104 |
105 | img_cu = cp.divide(cp.asarray(img, dtype=cp.float32), 255.0)
106 | img_cu = img_cu.transpose(2, 0, 1)
107 | img_cu = cp.subtract(img_cu, mean_cp)
108 | img_cu = cp.divide(img_cu, std_cp)
109 |
110 | # cupy to torch tensor
111 | # img_tensor = from_dlpack(toDlpack(img_cu))
112 |
113 | return img_cu
114 |
--------------------------------------------------------------------------------
/alfred/deprecated/dl/torch/env.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 | import importlib
3 | import importlib.util
4 | import logging
5 | import os
6 | import random
7 | import sys
8 | from datetime import datetime
9 |
10 | import numpy as np
11 | import torch
12 |
13 | __all__ = ["seed_all_rng"]
14 |
15 |
16 | def seed_all_rng(seed=None):
17 | """
18 | Set the random seed for the RNG in torch, numpy and python.
19 |
20 | Args:
21 | seed (int): if None, will use a strong random seed.
22 | """
23 | if seed is None:
24 | seed = (
25 | os.getpid()
26 | + int(datetime.now().strftime("%S%f"))
27 | + int.from_bytes(os.urandom(2), "big")
28 | )
29 | logger = logging.getLogger(__name__)
30 | logger.info("Using a generated random seed {}".format(seed))
31 | np.random.seed(seed)
32 | torch.set_rng_state(torch.manual_seed(seed).get_state())
33 | random.seed(seed)
34 |
35 |
36 | # from https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
37 | def _import_file(module_name, file_path, make_importable=False):
38 | spec = importlib.util.spec_from_file_location(module_name, file_path)
39 | module = importlib.util.module_from_spec(spec)
40 | spec.loader.exec_module(module)
41 | if make_importable:
42 | sys.modules[module_name] = module
43 | return module
44 |
45 |
46 | def _configure_libraries():
47 | """
48 | Configurations for some libraries.
49 | """
50 | # An environment option to disable `import cv2` globally,
51 | # in case it leads to negative performance impact
52 | disable_cv2 = int(os.environ.get("dl_lib_DISABLE_CV2", False))
53 | if disable_cv2:
54 | sys.modules["cv2"] = None
55 | else:
56 | # Disable opencl in opencv since its interaction with cuda often has negative effects
57 | # This envvar is supported after OpenCV 3.4.0
58 | os.environ["OPENCV_OPENCL_RUNTIME"] = "disabled"
59 | try:
60 | import cv2
61 |
62 | if int(cv2.__version__.split(".")[0]) >= 3:
63 | cv2.ocl.setUseOpenCL(False)
64 | except ImportError:
65 | pass
66 |
67 |
68 | _ENV_SETUP_DONE = False
69 |
70 |
71 | def setup_environment():
72 | """Perform environment setup work. The default setup is a no-op, but this
73 | function allows the user to specify a Python source file or a module in
74 | the $dl_lib_ENV_MODULE environment variable, that performs
75 | custom setup work that may be necessary to their computing environment.
76 | """
77 | global _ENV_SETUP_DONE
78 | if _ENV_SETUP_DONE:
79 | return
80 | _ENV_SETUP_DONE = True
81 |
82 | _configure_libraries()
83 |
84 | custom_module_path = os.environ.get("dl_lib_ENV_MODULE")
85 |
86 | if custom_module_path:
87 | setup_custom_environment(custom_module_path)
88 | else:
89 | # The default setup is a no-op
90 | pass
91 |
92 |
93 | def setup_custom_environment(custom_module):
94 | """
95 | Load custom environment setup by importing a Python source file or a
96 | module, and run the setup function.
97 | """
98 | if custom_module.endswith(".py"):
99 | module = _import_file("dl_lib.utils.env.custom_module", custom_module)
100 | else:
101 | module = importlib.import_module(custom_module)
102 | assert hasattr(module, "setup_environment") and callable(
103 | module.setup_environment
104 | ), (
105 | "Custom environment module defined in {} does not have the "
106 | "required callable attribute 'setup_environment'."
107 | ).format(
108 | custom_module
109 | )
110 | module.setup_environment()
111 |
--------------------------------------------------------------------------------
/alfred/modules/data/convert_csv2voc.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 | """
25 | Convert a CSV labeling dataset to VOC format
26 |
27 | CARDS_COURTYARD_B_T_frame_0011.jpg,1280,720,yourleft,647,453,824,551
28 | CARDS_COURTYARD_B_T_frame_0011.jpg,1280,720,yourright,515,431,622,543
29 |
30 | assuming images and csv under same folder.
31 |
32 | """
33 | import os
34 | import sys
35 | import glob
36 | import numpy as np
37 | from PIL import Image
38 |
39 | try:
40 | from lxml.etree import Element, SubElement, tostring, ElementTree, tostring
41 | except ImportError:
42 | pass
43 |
44 |
45 | def convert_one_csv_to_xml(csv_f, img_f):
46 | if os.path.exists(csv_f):
47 | csv_anno = np.loadtxt(csv_f)
48 | if len(csv_anno.shape) < 2 and csv_anno.shape[0] != 0:
49 | csv_anno = np.expand_dims(csv_anno, axis=0)
50 | target_path = os.path.join(
51 | os.path.dirname(csv_f), os.path.basename(csv_f).split(".")[0] + ".xml"
52 | )
53 | # convert xml
54 | if os.path.exists(img_f):
55 | im = Image.open(img_f)
56 | width = im.size[0]
57 | height = im.size[1]
58 | node_root = Element("annotation")
59 | node_folder = SubElement(node_root, "folder")
60 | node_folder.text = "images"
61 | node_filename = SubElement(node_root, "filename")
62 | node_filename.text = os.path.basename(img_f)
63 | node_size = SubElement(node_root, "size")
64 | node_width = SubElement(node_size, "width")
65 | node_width.text = str(width)
66 | node_height = SubElement(node_size, "height")
67 | node_height.text = str(height)
68 | node_depth = SubElement(node_size, "depth")
69 | node_depth.text = "3"
70 |
71 | for item in csv_anno:
72 | node_object = SubElement(node_root, "object")
73 | node_name = SubElement(node_object, "name")
74 | node_name.text = label_map[item[0]]
75 | node_difficult = SubElement(node_object, "difficult")
76 | node_difficult.text = "0"
77 | node_bndbox = SubElement(node_object, "bndbox")
78 | node_xmin = SubElement(node_bndbox, "xmin")
79 | node_xmin.text = str(int(item[1] * width))
80 | node_ymin = SubElement(node_bndbox, "ymin")
81 | node_ymin.text = str(int(item[1] * height))
82 | node_xmax = SubElement(node_bndbox, "xmax")
83 | node_xmax.text = str(int(item[2] * width))
84 | node_ymax = SubElement(node_bndbox, "ymax")
85 | node_ymax.text = str(int(item[3] * height))
86 | f = open(target_path, "wb")
87 | f.write(tostring(node_root, pretty_print=True))
88 | f.close()
89 | else:
90 | print("image: {} not exist.".format(img_f))
91 | else:
92 | print("!! {} not exist.".format(csv_f))
93 |
--------------------------------------------------------------------------------
/alfred/modules/data/txt2voc.py:
--------------------------------------------------------------------------------
1 | # Script to convert yolo annotations to voc format
2 |
3 | # Sample format
4 | #
5 | # _image_fashion
6 | # brooke-cagle-39574.jpg
7 | #
8 | # 1200
9 | # 800
10 | # 3
11 | #
12 | # 0
13 | #
25 | #
26 | import os
27 | import xml.etree.cElementTree as ET
28 | from PIL import Image
29 | import sys
30 | import glob
31 |
32 |
33 | CLASS_MAPPING = {
34 | "0": "name"
35 | # Add your remaining classes here.
36 | }
37 |
38 |
39 | def create_root(file_prefix, width, height):
40 | root = ET.Element("annotations")
41 | ET.SubElement(root, "filename").text = "{}.jpg".format(file_prefix)
42 | ET.SubElement(root, "folder").text = "images"
43 | size = ET.SubElement(root, "size")
44 | ET.SubElement(size, "width").text = str(width)
45 | ET.SubElement(size, "height").text = str(height)
46 | ET.SubElement(size, "depth").text = "3"
47 | return root
48 |
49 |
50 | def create_object_annotation(root, voc_labels):
51 | for voc_label in voc_labels:
52 | obj = ET.SubElement(root, "object")
53 | ET.SubElement(obj, "name").text = voc_label[0]
54 | ET.SubElement(obj, "pose").text = "Unspecified"
55 | ET.SubElement(obj, "truncated").text = str(0)
56 | ET.SubElement(obj, "difficult").text = str(0)
57 | bbox = ET.SubElement(obj, "bndbox")
58 | ET.SubElement(bbox, "xmin").text = str(voc_label[1])
59 | ET.SubElement(bbox, "ymin").text = str(voc_label[2])
60 | ET.SubElement(bbox, "xmax").text = str(voc_label[3])
61 | ET.SubElement(bbox, "ymax").text = str(voc_label[4])
62 | return root
63 |
64 |
65 | def create_file(file_prefix, width, height, voc_labels, des_dir):
66 | root = create_root(file_prefix, width, height)
67 | root = create_object_annotation(root, voc_labels)
68 | tree = ET.ElementTree(root)
69 | tree.write("{}/{}.xml".format(des_dir, file_prefix))
70 |
71 |
72 | def read_file(file_path, des_dir):
73 | file_prefix = os.path.basename(file_path).split(".txt")[0]
74 | image_file_name = "{}.jpg".format(file_prefix)
75 | img = Image.open("{}/{}".format("images", image_file_name))
76 | w, h = img.size
77 | with open(file_path, "r") as file:
78 | lines = file.readlines()
79 | voc_labels = []
80 | for line in lines:
81 | voc = []
82 | line = line.strip()
83 | data = line.split()
84 | # voc.append(CLASS_MAPPING.get(data[0]))
85 | voc.append(data[0])
86 | bbox_width = float(data[3]) * w
87 | bbox_height = float(data[4]) * h
88 | center_x = float(data[1]) * w
89 | center_y = float(data[2]) * h
90 | voc.append(center_x - (bbox_width / 2))
91 | voc.append(center_y - (bbox_height / 2))
92 | voc.append(center_x + (bbox_width / 2))
93 | voc.append(center_y + (bbox_height / 2))
94 | voc_labels.append(voc)
95 | create_file(file_prefix, w, h, voc_labels, des_dir)
96 | print("Processing complete for file: {}".format(file_path))
97 |
98 |
99 | def start(dir_name):
100 | des_d = "output_xmls"
101 | os.makedirs(des_d, exist_ok=True)
102 | txts = glob.glob(os.path.join(dir_name, "*.txt"))
103 | for filename in txts:
104 | read_file(filename, des_d)
105 |
106 |
107 | if __name__ == "__main__":
108 | start(sys.argv[1])
109 |
--------------------------------------------------------------------------------
/alfred/deprecated/dl/torch/tools.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 | import inspect
25 | from collections import OrderedDict
26 | import numpy as np
27 | import torch
28 |
29 |
30 | def get_pos_to_kw_map(func):
31 | pos_to_kw = {}
32 | fsig = inspect.signature(func)
33 | pos = 0
34 | for name, info in fsig.parameters.items():
35 | if info.kind is info.POSITIONAL_OR_KEYWORD:
36 | pos_to_kw[pos] = name
37 | pos += 1
38 | return pos_to_kw
39 |
40 |
41 | def get_kw_to_default_map(func):
42 | kw_to_default = {}
43 | fsig = inspect.signature(func)
44 | for name, info in fsig.parameters.items():
45 | if info.kind is info.POSITIONAL_OR_KEYWORD:
46 | if info.default is not info.empty:
47 | kw_to_default[name] = info.default
48 | return kw_to_default
49 |
50 |
51 | def change_default_args(**kwargs):
52 | def layer_wrapper(layer_class):
53 | class DefaultArgLayer(layer_class):
54 | def __init__(self, *args, **kw):
55 | pos_to_kw = get_pos_to_kw_map(layer_class.__init__)
56 | kw_to_pos = {kw: pos for pos, kw in pos_to_kw.items()}
57 | for key, val in kwargs.items():
58 | if key not in kw and kw_to_pos[key] > len(args):
59 | kw[key] = val
60 | super().__init__(*args, **kw)
61 |
62 | return DefaultArgLayer
63 |
64 | return layer_wrapper
65 |
66 |
67 | def torch_to_np_dtype(ttype):
68 | type_map = {
69 | torch.float16: np.dtype(np.float16),
70 | torch.float32: np.dtype(np.float32),
71 | torch.float16: np.dtype(np.float64),
72 | torch.int32: np.dtype(np.int32),
73 | torch.int64: np.dtype(np.int64),
74 | torch.uint8: np.dtype(np.uint8),
75 | }
76 | return type_map[ttype]
77 |
78 |
79 | def check_tensor_equal(t_a, t_b, epsilon=1e-5):
80 | res = torch.isclose(t_a, t_b, epsilon)
81 | res2 = torch.all(res)
82 | res2 = res2.detach().cpu().numpy()
83 | if res2:
84 | return res2, None
85 | else:
86 | return res2, res
87 |
88 |
89 | def torch_load_state_dict_without_module(ckp_file, map_location='cpu', specific_key=None):
90 | """
91 | this function using for load a model without module
92 | """
93 | checkpoint = torch.load(ckp_file, map_location=map_location)
94 | if 'state_dict' in checkpoint.keys():
95 | state_dict = checkpoint["state_dict"]
96 | else:
97 | if specific_key is not None and specific_key in checkpoint.keys():
98 | state_dict = checkpoint[specific_key]
99 | else:
100 | state_dict = checkpoint
101 |
102 | new_state_dict = OrderedDict()
103 | for k, v in state_dict.items():
104 | if "module." in k:
105 | k = k[7:] # remove 'module.' of dataparallel
106 | new_state_dict[k] = v
107 | return new_state_dict
108 |
--------------------------------------------------------------------------------
/alfred/modules/data/view_txt.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 | """
25 |
26 | view txt labeled detection data
27 |
28 |
29 | """
30 | import os
31 | import sys
32 | import cv2
33 | from glob import glob
34 | import os
35 | import sys
36 | import cv2
37 | from alfred.utils.log import logger as logging
38 |
39 |
40 | def vis_det_txt(img_root, label_root):
41 | logging.info("img root: {}, label root: {}".format(img_root, label_root))
42 | # auto detection .jpg or .png images
43 | txt_files = glob(os.path.join(label_root, "*.txt"))
44 | for txt_f in txt_files:
45 | img_f = os.path.join(img_root, os.path.basename(txt_f).split(".")[0] + ".jpg")
46 | if os.path.exists(img_f):
47 | img = cv2.imread(img_f)
48 | if os.path.exists(txt_f):
49 | with open(txt_f) as f:
50 | annos = f.readlines()
51 | for ann in annos:
52 | ann = ann.strip().split(" ")
53 | if len(ann) == 5:
54 | # not include prob
55 | category = ann[0]
56 | xmin = int(float(ann[1]))
57 | ymin = int(float(ann[2]))
58 | xmax = int(float(ann[3]))
59 | ymax = int(float(ann[4]))
60 | cv2.putText(
61 | img,
62 | category,
63 | (xmin, ymin),
64 | cv2.FONT_HERSHEY_COMPLEX,
65 | 0.7,
66 | (255, 255, 255),
67 | )
68 | cv2.rectangle(
69 | img, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2, 1
70 | )
71 | elif len(ann) == 6:
72 | # include prob
73 | category = ann[0]
74 | prob = float(ann[1])
75 | xmin = int(float(ann[2]))
76 | ymin = int(float(ann[3]))
77 | xmax = int(float(ann[4]))
78 | ymax = int(float(ann[5]))
79 | cv2.putText(
80 | img,
81 | "{} {}".format(category, prob),
82 | (xmin, ymin),
83 | cv2.FONT_HERSHEY_COMPLEX,
84 | 0.7,
85 | (255, 255, 255),
86 | )
87 | cv2.rectangle(
88 | img, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2, 1
89 | )
90 | cv2.imshow("txt check", img)
91 | cv2.waitKey(0)
92 | else:
93 | logging.warning("xxxx image: {} not found.".format(img_f))
94 |
95 |
96 | if __name__ == "__main__":
97 | vis_det_txt(sys.argv[1], sys.argv[2])
98 |
--------------------------------------------------------------------------------
/alfred/vis/image/pose_datasets/onehand10k.py:
--------------------------------------------------------------------------------
1 | dataset_info = dict(
2 | dataset_name="onehand10k",
3 | paper_info=dict(
4 | author="Wang, Yangang and Peng, Cong and Liu, Yebin",
5 | title="Mask-pose cascaded cnn for 2d hand pose estimation "
6 | "from single color image",
7 | container="IEEE Transactions on Circuits and Systems " "for Video Technology",
8 | year="2018",
9 | homepage="https://www.yangangwang.com/papers/WANG-MCC-2018-10.html",
10 | ),
11 | keypoint_info={
12 | 0: dict(name="wrist", id=0, color=[255, 255, 255], type="", swap=""),
13 | 1: dict(name="thumb1", id=1, color=[255, 128, 0], type="", swap=""),
14 | 2: dict(name="thumb2", id=2, color=[255, 128, 0], type="", swap=""),
15 | 3: dict(name="thumb3", id=3, color=[255, 128, 0], type="", swap=""),
16 | 4: dict(name="thumb4", id=4, color=[255, 128, 0], type="", swap=""),
17 | 5: dict(name="forefinger1", id=5, color=[255, 153, 255], type="", swap=""),
18 | 6: dict(name="forefinger2", id=6, color=[255, 153, 255], type="", swap=""),
19 | 7: dict(name="forefinger3", id=7, color=[255, 153, 255], type="", swap=""),
20 | 8: dict(name="forefinger4", id=8, color=[255, 153, 255], type="", swap=""),
21 | 9: dict(name="middle_finger1", id=9, color=[102, 178, 255], type="", swap=""),
22 | 10: dict(name="middle_finger2", id=10, color=[102, 178, 255], type="", swap=""),
23 | 11: dict(name="middle_finger3", id=11, color=[102, 178, 255], type="", swap=""),
24 | 12: dict(name="middle_finger4", id=12, color=[102, 178, 255], type="", swap=""),
25 | 13: dict(name="ring_finger1", id=13, color=[255, 51, 51], type="", swap=""),
26 | 14: dict(name="ring_finger2", id=14, color=[255, 51, 51], type="", swap=""),
27 | 15: dict(name="ring_finger3", id=15, color=[255, 51, 51], type="", swap=""),
28 | 16: dict(name="ring_finger4", id=16, color=[255, 51, 51], type="", swap=""),
29 | 17: dict(name="pinky_finger1", id=17, color=[0, 255, 0], type="", swap=""),
30 | 18: dict(name="pinky_finger2", id=18, color=[0, 255, 0], type="", swap=""),
31 | 19: dict(name="pinky_finger3", id=19, color=[0, 255, 0], type="", swap=""),
32 | 20: dict(name="pinky_finger4", id=20, color=[0, 255, 0], type="", swap=""),
33 | },
34 | skeleton_info={
35 | 0: dict(link=("wrist", "thumb1"), id=0, color=[255, 128, 0]),
36 | 1: dict(link=("thumb1", "thumb2"), id=1, color=[255, 128, 0]),
37 | 2: dict(link=("thumb2", "thumb3"), id=2, color=[255, 128, 0]),
38 | 3: dict(link=("thumb3", "thumb4"), id=3, color=[255, 128, 0]),
39 | 4: dict(link=("wrist", "forefinger1"), id=4, color=[255, 153, 255]),
40 | 5: dict(link=("forefinger1", "forefinger2"), id=5, color=[255, 153, 255]),
41 | 6: dict(link=("forefinger2", "forefinger3"), id=6, color=[255, 153, 255]),
42 | 7: dict(link=("forefinger3", "forefinger4"), id=7, color=[255, 153, 255]),
43 | 8: dict(link=("wrist", "middle_finger1"), id=8, color=[102, 178, 255]),
44 | 9: dict(link=("middle_finger1", "middle_finger2"), id=9, color=[102, 178, 255]),
45 | 10: dict(
46 | link=("middle_finger2", "middle_finger3"), id=10, color=[102, 178, 255]
47 | ),
48 | 11: dict(
49 | link=("middle_finger3", "middle_finger4"), id=11, color=[102, 178, 255]
50 | ),
51 | 12: dict(link=("wrist", "ring_finger1"), id=12, color=[255, 51, 51]),
52 | 13: dict(link=("ring_finger1", "ring_finger2"), id=13, color=[255, 51, 51]),
53 | 14: dict(link=("ring_finger2", "ring_finger3"), id=14, color=[255, 51, 51]),
54 | 15: dict(link=("ring_finger3", "ring_finger4"), id=15, color=[255, 51, 51]),
55 | 16: dict(link=("wrist", "pinky_finger1"), id=16, color=[0, 255, 0]),
56 | 17: dict(link=("pinky_finger1", "pinky_finger2"), id=17, color=[0, 255, 0]),
57 | 18: dict(link=("pinky_finger2", "pinky_finger3"), id=18, color=[0, 255, 0]),
58 | 19: dict(link=("pinky_finger3", "pinky_finger4"), id=19, color=[0, 255, 0]),
59 | },
60 | joint_weights=[1.0] * 21,
61 | sigmas=[],
62 | )
63 |
--------------------------------------------------------------------------------
/alfred/vis/image/pose_datasets/interhand2d.py:
--------------------------------------------------------------------------------
1 | dataset_info = dict(
2 | dataset_name="interhand2d",
3 | paper_info=dict(
4 | author="Moon, Gyeongsik and Yu, Shoou-I and Wen, He and "
5 | "Shiratori, Takaaki and Lee, Kyoung Mu",
6 | title="InterHand2.6M: A dataset and baseline for 3D "
7 | "interacting hand pose estimation from a single RGB image",
8 | container="arXiv",
9 | year="2020",
10 | homepage="https://mks0601.github.io/InterHand2.6M/",
11 | ),
12 | keypoint_info={
13 | 0: dict(name="thumb4", id=0, color=[255, 128, 0], type="", swap=""),
14 | 1: dict(name="thumb3", id=1, color=[255, 128, 0], type="", swap=""),
15 | 2: dict(name="thumb2", id=2, color=[255, 128, 0], type="", swap=""),
16 | 3: dict(name="thumb1", id=3, color=[255, 128, 0], type="", swap=""),
17 | 4: dict(name="forefinger4", id=4, color=[255, 153, 255], type="", swap=""),
18 | 5: dict(name="forefinger3", id=5, color=[255, 153, 255], type="", swap=""),
19 | 6: dict(name="forefinger2", id=6, color=[255, 153, 255], type="", swap=""),
20 | 7: dict(name="forefinger1", id=7, color=[255, 153, 255], type="", swap=""),
21 | 8: dict(name="middle_finger4", id=8, color=[102, 178, 255], type="", swap=""),
22 | 9: dict(name="middle_finger3", id=9, color=[102, 178, 255], type="", swap=""),
23 | 10: dict(name="middle_finger2", id=10, color=[102, 178, 255], type="", swap=""),
24 | 11: dict(name="middle_finger1", id=11, color=[102, 178, 255], type="", swap=""),
25 | 12: dict(name="ring_finger4", id=12, color=[255, 51, 51], type="", swap=""),
26 | 13: dict(name="ring_finger3", id=13, color=[255, 51, 51], type="", swap=""),
27 | 14: dict(name="ring_finger2", id=14, color=[255, 51, 51], type="", swap=""),
28 | 15: dict(name="ring_finger1", id=15, color=[255, 51, 51], type="", swap=""),
29 | 16: dict(name="pinky_finger4", id=16, color=[0, 255, 0], type="", swap=""),
30 | 17: dict(name="pinky_finger3", id=17, color=[0, 255, 0], type="", swap=""),
31 | 18: dict(name="pinky_finger2", id=18, color=[0, 255, 0], type="", swap=""),
32 | 19: dict(name="pinky_finger1", id=19, color=[0, 255, 0], type="", swap=""),
33 | 20: dict(name="wrist", id=20, color=[255, 255, 255], type="", swap=""),
34 | },
35 | skeleton_info={
36 | 0: dict(link=("wrist", "thumb1"), id=0, color=[255, 128, 0]),
37 | 1: dict(link=("thumb1", "thumb2"), id=1, color=[255, 128, 0]),
38 | 2: dict(link=("thumb2", "thumb3"), id=2, color=[255, 128, 0]),
39 | 3: dict(link=("thumb3", "thumb4"), id=3, color=[255, 128, 0]),
40 | 4: dict(link=("wrist", "forefinger1"), id=4, color=[255, 153, 255]),
41 | 5: dict(link=("forefinger1", "forefinger2"), id=5, color=[255, 153, 255]),
42 | 6: dict(link=("forefinger2", "forefinger3"), id=6, color=[255, 153, 255]),
43 | 7: dict(link=("forefinger3", "forefinger4"), id=7, color=[255, 153, 255]),
44 | 8: dict(link=("wrist", "middle_finger1"), id=8, color=[102, 178, 255]),
45 | 9: dict(link=("middle_finger1", "middle_finger2"), id=9, color=[102, 178, 255]),
46 | 10: dict(
47 | link=("middle_finger2", "middle_finger3"), id=10, color=[102, 178, 255]
48 | ),
49 | 11: dict(
50 | link=("middle_finger3", "middle_finger4"), id=11, color=[102, 178, 255]
51 | ),
52 | 12: dict(link=("wrist", "ring_finger1"), id=12, color=[255, 51, 51]),
53 | 13: dict(link=("ring_finger1", "ring_finger2"), id=13, color=[255, 51, 51]),
54 | 14: dict(link=("ring_finger2", "ring_finger3"), id=14, color=[255, 51, 51]),
55 | 15: dict(link=("ring_finger3", "ring_finger4"), id=15, color=[255, 51, 51]),
56 | 16: dict(link=("wrist", "pinky_finger1"), id=16, color=[0, 255, 0]),
57 | 17: dict(link=("pinky_finger1", "pinky_finger2"), id=17, color=[0, 255, 0]),
58 | 18: dict(link=("pinky_finger2", "pinky_finger3"), id=18, color=[0, 255, 0]),
59 | 19: dict(link=("pinky_finger3", "pinky_finger4"), id=19, color=[0, 255, 0]),
60 | },
61 | joint_weights=[1.0] * 21,
62 | sigmas=[],
63 | )
64 |
--------------------------------------------------------------------------------
/examples/draw_3d_pointcloud.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2020 JinTian.
3 | #
4 | # This file is part of alfred
5 | # (see http://jinfagang.github.io).
6 | #
7 | # Licensed to the Apache Software Foundation (ASF) under one
8 | # or more contributor license agreements. See the NOTICE file
9 | # distributed with this work for additional information
10 | # regarding copyright ownership. The ASF licenses this file
11 | # to you under the Apache License, Version 2.0 (the
12 | # "License"); you may not use this file except in compliance
13 | # with the License. You may obtain a copy of the License at
14 | #
15 | # http://www.apache.org/licenses/LICENSE-2.0
16 | #
17 | # Unless required by applicable law or agreed to in writing,
18 | # software distributed under the License is distributed on an
19 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
20 | # KIND, either express or implied. See the License for the
21 | # specific language governing permissions and limitations
22 | # under the License.
23 | #
24 | from alfred.vis.pointcloud.pointcloud_vis import draw_pointclouds_boxes_o3d
25 | import open3d as o3d
26 | import numpy as np
27 | from alfred.fusion.common import draw_3d_box, compute_3d_box_lidar_coords
28 | from alfred.fusion.kitti_fusion import load_pc_from_file
29 | import os
30 |
31 |
32 | v_f = os.path.join(os.path.dirname(os.path.abspath(__file__)), "./data/000011.bin")
33 | pcs = load_pc_from_file(v_f)
34 |
35 |
36 | # try getting 3d boxes coordinates
37 |
38 | res = [
39 | [4.481686, 5.147319, -1.0229858, 1.5728549, 3.646751, 1.5121397, 1.5486346],
40 | [-2.5172017, 5.0262384, -1.0679419, 1.6241353, 4.0445814, 1.4938312, 1.620804],
41 | [1.1783253, -2.9209857, -0.9852259, 1.5852798, 3.7360613, 1.4671413, 1.5811548],
42 | [12.925569, -4.9808474, -0.71562666, 0.5328532, 0.89768076, 1.7436955, 0.7869441],
43 | [-9.657954, -2.9310253, -0.9663244, 1.6315838, 4.0691543, 1.4506648, 4.7061768],
44 | [-7.734651, 4.928315, -1.3513744, 1.7096852, 4.41021, 1.4849466, 1.5580404],
45 | [-21.06287, -6.378005, -0.6494193, 0.58654386, 0.67096156, 1.7274126, 1.5062331],
46 | [-12.977588, 4.7324443, -1.2884868, 1.6366509, 3.993301, 1.4792416, 1.5961027],
47 | [27.237848, 4.973592, -0.63590205, 1.6796488, 4.1773257, 1.8397285, 1.5534456],
48 | [-15.21727, -3.3323386, -1.1841949, 1.5691711, 3.7851675, 1.4302691, 1.4623685],
49 | [-8.560741, -15.309304, -0.40493315, 1.5614295, 3.6039133, 1.4802926, 3.685232],
50 | [-28.535696, 1.8784677, -1.349385, 1.8589652, 4.6122866, 2.0191495, 4.708105],
51 | [22.139666, -19.737762, -0.74519694, 0.52543664, 1.7905389, 1.684143, -0.26117292],
52 | [-4.4033785, -2.856424, -0.95746094, 1.7221596, 4.5044794, 1.6574095, 1.5402203],
53 | [7.085311, -12.124656, -0.7908472, 1.605196, 4.036379, 1.4904786, 3.1525888],
54 | [-17.75546, 4.869718, -1.4353731, 1.625128, 4.0645328, 1.4669982, 1.5843123],
55 | [22.015368, -16.157223, -0.97120696, 0.70649695, 1.8466028, 1.6473441, 3.46424],
56 | [34.445316, -2.0812414, -0.5032885, 0.6895117, 0.8842125, 1.7723539, -1.4539356],
57 | [-32.120346, 7.0260167, -1.6048443, 0.59323585, 0.7810404, 1.7134606, 0.9840808],
58 | [11.191077, -20.68808, -0.3166721, 2.1275487, 6.112693, 2.4575462, 4.6473494],
59 | [-0.18853411, -11.496099, -0.723109, 1.6154484, 3.9286208, 1.5749075, 3.0955489],
60 | [7.4211736, -7.1129866, -1.355744, 1.5750822, 3.9536934, 1.4568869, -0.6677291],
61 | [16.404984, 7.875185, -0.9816911, 0.64251673, 0.63132536, 1.7938845, 1.0830851],
62 | [20.704462, -21.648046, -0.99220616, 1.5985962, 3.830404, 1.521529, 3.0288131],
63 | [-34.060417, -1.6139596, -1.1061747, 0.73393285, 0.8841753, 1.7669718, 4.5250244],
64 | [-9.143257, -8.996165, -0.9218217, 1.5279316, 3.592435, 1.4721779, 0.85066897],
65 | [-31.856539, -2.953291, -1.4160485, 0.67631316, 0.86612713, 1.7683575, 3.113426],
66 | [-29.955063, -4.6513176, -1.2724423, 1.5479406, 3.5412807, 1.463421, 0.11858773],
67 | [10.639572, 11.339079, -0.35397023, 0.6703583, 0.57711476, 1.7787935, 4.486712],
68 | [-11.947865, -21.075172, -0.32996762, 1.5983682, 3.945621, 1.4992962, 1.6880405],
69 | [-17.38843, -6.5131726, -0.07191068, 0.6577756, 0.7161297, 1.8168749, 1.8645211],
70 | [2.0013125, -16.632671, -0.54558295, 0.54916567, 1.8482145, 1.7980447, 5.3003416],
71 | ]
72 |
73 |
74 | if __name__ == "__main__":
75 | draw_pointclouds_boxes_o3d(pcs, res)
76 |
--------------------------------------------------------------------------------