├── .gitattributes
├── .gitignore
├── Pipfile
├── Pipfile.lock
├── README.md
├── arcgis_integration
├── Script.hlk
├── __pycache__
│ ├── class_measure.cpython-36.pyc
│ └── class_measure.cpython-37.pyc
├── class_measure.py
├── command.py
└── exe_measure.py
├── camera
├── data_capture_no_gui.py
└── qt gui
│ ├── cmd_class.py
│ ├── img
│ ├── 1.jpg
│ ├── intel.ico
│ └── intel.png
│ ├── qtcam.exe
│ ├── qtcam.py
│ └── temp.py
├── examples
├── bag
│ ├── 0917_006.bag
│ ├── 0917_007.bag
│ ├── 0917_008.bag
│ ├── 0917_009.bag
│ └── 0917_010.bag
├── foto_log
│ ├── 0917_006.txt
│ ├── 0917_007.txt
│ ├── 0917_008.txt
│ ├── 0917_009.txt
│ └── 0917_010.txt
└── google earth.PNG
├── processor
├── data_process_tkinter.py
├── gui2.py
└── proccessor.py
├── requirements.txt
└── separate functions
├── 1.PNG
├── background_remove.py
├── check_bag_valid.py
├── cvcanvas.py
├── data_process0812.py
├── datacapture_mp_kml.py
├── geotag.py
├── jpgcreator.py
├── live_gps.py
├── measure_new.py
├── opencv_find_key.py
├── phase 1
├── Hello World.py
├── Ruler.py
├── color_to_png.py
├── depth to png.py
├── depth_to_png.py
├── manual_photo.py
├── matchframelist.py
├── measure.py
├── measure_loop.py
├── measure_loop_temp.py
├── measure_loop_temp_0405.py
├── measure_read_per_frame.py
├── measure_search.py
├── measure_stream.py
├── measure_synced.py
├── measure_with_mpl.py
├── read_bag.py
├── record_loop.py
├── save-translate.py
├── to_frame_script.py
└── widgets2.py
├── photo_capture.py
├── single_frameset.py
└── widgets2.py
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.bag filter=lfs diff=lfs merge=lfs -text
2 | .idea
3 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | location.csv
3 | foto_location.csv
4 | .idea
--------------------------------------------------------------------------------
/Pipfile:
--------------------------------------------------------------------------------
1 | [[source]]
2 | name = "pypi"
3 | url = "https://pypi.org/simple"
4 | verify_ssl = true
5 |
6 | [dev-packages]
7 |
8 | [packages]
9 | pyrealsense2 = "*"
10 | pyserial = "*"
11 | pyqt5 = "*"
12 | numpy = "*"
13 | opencv-python = "*"
14 | piexif = "*"
15 | fraction = "*"
16 |
17 | [requires]
18 | python_version = "3.7"
19 |
--------------------------------------------------------------------------------
/Pipfile.lock:
--------------------------------------------------------------------------------
1 | {
2 | "_meta": {
3 | "hash": {
4 | "sha256": "bd01a48eee3da1bb553ff512153f4918d0be814b99c439c84d737273ebe1703b"
5 | },
6 | "pipfile-spec": 6,
7 | "requires": {
8 | "python_version": "3.7"
9 | },
10 | "sources": [
11 | {
12 | "name": "pypi",
13 | "url": "https://pypi.org/simple",
14 | "verify_ssl": true
15 | }
16 | ]
17 | },
18 | "default": {
19 | "fraction": {
20 | "hashes": [
21 | "sha256:242dcff2c097e45467533d985125ac6ec2b1590972a22592449fbab8fae82188"
22 | ],
23 | "index": "pypi",
24 | "version": "==1.1.0"
25 | },
26 | "numpy": {
27 | "hashes": [
28 | "sha256:0a7a1dd123aecc9f0076934288ceed7fd9a81ba3919f11a855a7887cbe82a02f",
29 | "sha256:0c0763787133dfeec19904c22c7e358b231c87ba3206b211652f8cbe1241deb6",
30 | "sha256:3d52298d0be333583739f1aec9026f3b09fdfe3ddf7c7028cb16d9d2af1cca7e",
31 | "sha256:43bb4b70585f1c2d153e45323a886839f98af8bfa810f7014b20be714c37c447",
32 | "sha256:475963c5b9e116c38ad7347e154e5651d05a2286d86455671f5b1eebba5feb76",
33 | "sha256:64874913367f18eb3013b16123c9fed113962e75d809fca5b78ebfbb73ed93ba",
34 | "sha256:683828e50c339fc9e68720396f2de14253992c495fdddef77a1e17de55f1decc",
35 | "sha256:6ca4000c4a6f95a78c33c7dadbb9495c10880be9c89316aa536eac359ab820ae",
36 | "sha256:75fd817b7061f6378e4659dd792c84c0b60533e867f83e0d1e52d5d8e53df88c",
37 | "sha256:7d81d784bdbed30137aca242ab307f3e65c8d93f4c7b7d8f322110b2e90177f9",
38 | "sha256:8d0af8d3664f142414fd5b15cabfd3b6cc3ef242a3c7a7493257025be5a6955f",
39 | "sha256:9679831005fb16c6df3dd35d17aa31dc0d4d7573d84f0b44cc481490a65c7725",
40 | "sha256:a8f67ebfae9f575d85fa859b54d3bdecaeece74e3274b0b5c5f804d7ca789fe1",
41 | "sha256:acbf5c52db4adb366c064d0b7c7899e3e778d89db585feadd23b06b587d64761",
42 | "sha256:ada4805ed51f5bcaa3a06d3dd94939351869c095e30a2b54264f5a5004b52170",
43 | "sha256:c7354e8f0eca5c110b7e978034cd86ed98a7a5ffcf69ca97535445a595e07b8e",
44 | "sha256:e2e9d8c87120ba2c591f60e32736b82b67f72c37ba88a4c23c81b5b8fa49c018",
45 | "sha256:e467c57121fe1b78a8f68dd9255fbb3bb3f4f7547c6b9e109f31d14569f490c3",
46 | "sha256:ede47b98de79565fcd7f2decb475e2dcc85ee4097743e551fe26cfc7eb3ff143",
47 | "sha256:f58913e9227400f1395c7b800503ebfdb0772f1c33ff8cb4d6451c06cabdf316",
48 | "sha256:fe39f5fd4103ec4ca3cb8600b19216cd1ff316b4990f4c0b6057ad982c0a34d5"
49 | ],
50 | "index": "pypi",
51 | "version": "==1.17.4"
52 | },
53 | "opencv-python": {
54 | "hashes": [
55 | "sha256:01505b131dc35f60e99a5da98b77156e37f872ae0ff5596e5e68d526bb572d3c",
56 | "sha256:0478a1037505ddde312806c960a5e8958d2cf7a2885e8f2f5dde74c4028e0b04",
57 | "sha256:17810b89f9ef8e8537e75332acf533e619e26ccadbf1b73f24bf338f2d327ddd",
58 | "sha256:19ad2ea9fb32946761b47b9d6eed51876a8329da127f27788263fecd66651ba0",
59 | "sha256:1a250edb739baf3e7c25d99a2ee252aac4f59a97e0bee39237eaa490fd0281d3",
60 | "sha256:3505468970448f66cd776cb9e179570c87988f94b5cf9bcbc4c2d88bd88bbdf1",
61 | "sha256:4e04a91da157885359f487534433340b2d709927559c80acf62c28167e59be02",
62 | "sha256:5a49cffcdec5e37217672579c3343565926d999642844efa9c6a031ed5f32318",
63 | "sha256:604b2ce3d4a86480ced0813da7fba269b4605ad9fea26cd2144d8077928d4b49",
64 | "sha256:61cbb8fa9565a0480c46028599431ad8f19181a7fac8070a700515fd54cd7377",
65 | "sha256:62d7c6e511c9454f099616315c695d02a584048e1affe034b39160db7a2ae34d",
66 | "sha256:6555272dd9efd412d17cdc1a4f4c2da5753c099d95d9ff01aca54bb9782fb5cf",
67 | "sha256:67d994c6b2b14cb9239e85dc7dfa6c08ef7cf6eb4def80c0af6141dfacc8cbb9",
68 | "sha256:68c9cbe538666c4667523821cc56caee49389bea06bae4c0fc2cd68bd264226a",
69 | "sha256:822ad8f628a9498f569c57d30865f5ef9ee17824cee0a1d456211f742028c135",
70 | "sha256:82d972429eb4fee22c1dc4204af2a2e981f010e5e4f66daea2a6c68381b79184",
71 | "sha256:9128924f5b58269ee221b8cf2d736f31bd3bb0391b92ee8504caadd68c8176a2",
72 | "sha256:9172cf8270572c494d8b2ae12ef87c0f6eed9d132927e614099f76843b0c91d7",
73 | "sha256:952bce4d30a8287b17721ddaad7f115dab268efee8576249ddfede80ec2ce404",
74 | "sha256:a8147718e70b1f170a3d26518e992160137365a4db0ed82a9efd3040f9f660d4",
75 | "sha256:bfdb636a3796ff223460ea0fcfda906b3b54f4bef22ae433a5b67e66fab00b25",
76 | "sha256:c9c3f27867153634e1083390920067008ebaaab78aeb09c4e0274e69746cb2c8",
77 | "sha256:d69be21973d450a4662ae6bd1b3df6b1af030e448d7276380b0d1adf7c8c2ae6",
78 | "sha256:db1479636812a6579a3753b72a6fefaa73190f32bf7b19e483f8bc750cebe1a5",
79 | "sha256:db8313d755962a7dd61e5c22a651e0743208adfdb255c6ec8904ce9cb02940c6",
80 | "sha256:e4625a6b032e7797958aeb630d6e3e91e3896d285020aae612e6d7b342d6dfea",
81 | "sha256:e8397a26966a1290836a52c34b362aabc65a422b9ffabcbbdec1862f023ccab8"
82 | ],
83 | "index": "pypi",
84 | "version": "==4.1.1.26"
85 | },
86 | "piexif": {
87 | "hashes": [
88 | "sha256:3bc435d171720150b81b15d27e05e54b8abbde7b4242cddd81ef160d283108b6",
89 | "sha256:83cb35c606bf3a1ea1a8f0a25cb42cf17e24353fd82e87ae3884e74a302a5f1b"
90 | ],
91 | "index": "pypi",
92 | "version": "==1.1.3"
93 | },
94 | "pyqt5": {
95 | "hashes": [
96 | "sha256:14737bb4673868d15fa91dad79fe293d7a93d76c56d01b3757b350b8dcb32b2d",
97 | "sha256:1936c321301f678d4e6703d52860e1955e5c4964e6fd00a1f86725ce5c29083c",
98 | "sha256:3f79de6e9f29e858516cc36ffc2b992e262af841f3799246aec282b76a3eccdf",
99 | "sha256:509daab1c5aca22e3cf9508128abf38e6e5ae311d7426b21f4189ffd66b196e9"
100 | ],
101 | "index": "pypi",
102 | "version": "==5.13.2"
103 | },
104 | "pyqt5-sip": {
105 | "hashes": [
106 | "sha256:02d94786bada670ab17a2b62ce95b3cf8e3b40c99d36007593a6334d551840bb",
107 | "sha256:06bc66b50556fb949f14875a4c224423dbf03f972497ccb883fb19b7b7c3b346",
108 | "sha256:091fbbe10a7aebadc0e8897a9449cda08d3c3f663460d812eca3001ca1ed3526",
109 | "sha256:0a067ade558befe4d46335b13d8b602b5044363bfd601419b556d4ec659bca18",
110 | "sha256:1910c1cb5a388d4e59ebb2895d7015f360f3f6eeb1700e7e33e866c53137eb9e",
111 | "sha256:1c7ad791ec86247f35243bbbdd29cd59989afbe0ab678e0a41211f4407f21dd8",
112 | "sha256:3c330ff1f70b3eaa6f63dce9274df996dffea82ad9726aa8e3d6cbe38e986b2f",
113 | "sha256:482a910fa73ee0e36c258d7646ef38f8061774bbc1765a7da68c65056b573341",
114 | "sha256:7695dfafb4f5549ce1290ae643d6508dfc2646a9003c989218be3ce42a1aa422",
115 | "sha256:8274ed50f4ffbe91d0f4cc5454394631edfecd75dc327aa01be8bc5818a57e88",
116 | "sha256:9047d887d97663790d811ac4e0d2e895f1bf2ecac4041691487de40c30239480",
117 | "sha256:9f6ab1417ecfa6c1ce6ce941e0cebc03e3ec9cd9925058043229a5f003ae5e40",
118 | "sha256:b43ba2f18999d41c3df72f590348152e14cd4f6dcea2058c734d688dfb1ec61f",
119 | "sha256:c3ab9ea1bc3f4ce8c57ebc66fb25cd044ef92ed1ca2afa3729854ecc59658905",
120 | "sha256:da69ba17f6ece9a85617743cb19de689f2d63025bf8001e2facee2ec9bcff18f",
121 | "sha256:ef3c7a0bf78674b0dda86ff5809d8495019903a096c128e1f160984b37848f73",
122 | "sha256:fabff832046643cdb93920ddaa8f77344df90768930fbe6bb33d211c4dcd0b5e"
123 | ],
124 | "version": "==12.7.0"
125 | },
126 | "pyrealsense2": {
127 | "hashes": [
128 | "sha256:0417e2cda00a29cc95e4da0ee2b42cafcc06f61b5ce12563fb0a533e522c1cd9",
129 | "sha256:08fd9806ff48bf923c05c4a9014652f27c3db318477334ce115340a511c5d212",
130 | "sha256:135e7df4bf22421e90566288fb5bfa52a160a680c24dabdc0f7f51bbbfbd7f42",
131 | "sha256:4c8a975c711f423750ab8cf30cecba9df17757616118e2c8b7423426f03f2903",
132 | "sha256:5799894abad6210541435d186001c6ca2690cf3fe0e5c1f9803079b81eca3264",
133 | "sha256:65fd76d42190c4820e18933503745a7b843852e096d4b1fc3f557e7e01549fa6",
134 | "sha256:7109b53c247b392c4870e6545b07fc5a629839883da9536a56fbe401c3563cd6",
135 | "sha256:712cdbb951e1b3c45a99502ecd0bbc7f979af55f39c93d0d514deb6f7d06f479",
136 | "sha256:7bfa5c212d28607da0a03d3a71ec492795105da942184ee6b4d865c6f09e1de2",
137 | "sha256:8323bcb2338f62824f78f710f7a810ec315bc8f4343c39f9a94e46f4d0ef4b07"
138 | ],
139 | "index": "pypi",
140 | "version": "==2.30.0.1184"
141 | },
142 | "pyserial": {
143 | "hashes": [
144 | "sha256:6e2d401fdee0eab996cf734e67773a0143b932772ca8b42451440cfed942c627",
145 | "sha256:e0770fadba80c31013896c7e6ef703f72e7834965954a78e71a3049488d4d7d8"
146 | ],
147 | "index": "pypi",
148 | "version": "==3.4"
149 | }
150 | },
151 | "develop": {}
152 | }
153 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # PyRealsense for d435
2 |
3 | ready-to-use exe version can be seen at https://github.com/soarwing52/Realsense_exe
4 |
5 | in this project will have record bag and measure picture from bag with the RealsenseD435 camera. The scripts are separated into two parts, collector and processor.
6 |
7 | the collector is to let the driver collect data effortlessly, with one remote controller.
8 |
9 | the processor is creating data from the bag files and the txt recorded, match frames and location, using arcpy API to create shapefile for users.
10 |
11 | The further usage is using matplotlib within ArcGIS 10, let users read ROSbag files with hyperlink script function
12 |
13 | ## Getting Started
14 |
15 | the script is mainly based on Realsense, using ROSbag and txt files to record gps location
16 |
17 | *the save_single_frame function can't get depth data, show always 0,0,0, therefore I can only use rs.recorder
18 |
19 |
20 | ### Prerequisites
21 |
22 | Hardware: GPS reciever (BU353S4 in my case)
23 | 
24 |
25 | RS camera
26 | 
27 |
28 |
29 |
30 | ### Data Preparation
31 |
32 | data collector: will generate folders: bag, log, and csvs :live location. foto_location
33 |
34 | data processor: from the collected bag and log file, create jpg and matched.txt
35 |
36 |
37 | ### Installing
38 |
39 | A step by step series of examples that tell you how to get a development env running
40 |
41 | will generate requirment.txt in the future
42 | Python packages
43 | ```
44 | pip install --upgrade pip
45 | pip install --user pyrealsense2
46 | pip install --user opencv-python (especially in user, else it will overwrite arcgis python package)
47 | argparse, py-getch, pyserial, fractions,
48 | ```
49 |
50 | ### Data Capture
51 |
52 | the data capture script will automatically generate three folders: bag, foto_log
53 |
54 | it will put the recorded bag in bag format,
55 |
56 | information of index,color frame number, depth frame number, longtitude, lattitude, day, month, year, timewill be saved in txt format in foto_log
57 |
58 | QtCam:
59 |
60 | 
61 |
62 |
63 | The new version:
64 | now it will write two csv files, and is used to open in QGIS, set open layer with csv, and the rendering frequency 1-5 seconds
65 | depends on the performance of your hardware.
66 | The reason is QGIS can render the huge amount of point data better than Google Earth
67 |
68 | in QGIS:
69 | Add Delimited Text Layer and set render frequency to 1 second for live.csv, foto_location.csv
70 |
71 | ------------------------------------------------------------------------------------------------------------------
72 | kml will generate live location as live, and the location with recorded places in Foto.kml
73 | use Google Earth: Temporary Places/Add/Networklink link to folder/kml/ live for location Foto.kml for foto point
74 |
75 | 
76 |
77 | ### Data process
78 |
79 | to prepare the data, use the data_prepare.py
80 |
81 | click the three buttons in the order left to right
82 |
83 | 1.generate jpg
84 | using openCV to generate JPEG files, one sharpening filter is added.
85 | ```
86 | kernel = np.array([[-1, -1, -1],
87 | [-1, 9, -1],
88 | [-1, -1, -1]])
89 | ```
90 | *due to disk writing speed, sometimes the realsense api will miss some frames, this script loops through the bag files for two times. can be run multiple times if needed
91 |
92 | *this script will skip existed files
93 |
94 | 2.generate shp
95 | because of latency, the frames recorded and the frame number in foto_log can jump in a range around +-2, for example: in log is frame no.100 but it could happen that the recorded is 99-102, so one extra step to match the real frame number to the log file is needed.
96 |
97 | after the matcher.txt is generated, this is the complete list of points taken, use MakeXYEventLayer, and FeatureClassToShapefile_conversion will save a shapefile.
98 |
99 | *the earlier version has match the frame in a 25ms timestamp for Color and Depth to make sure the frames are matching, the current version removed it, since the latency is acceptable
100 |
101 | 3.geotag
102 |
103 | according to the matcher.txt, photos EXIF will be written, the Lontitude and Latitude
104 |
105 | ### in ArcGIS
106 |
107 | in Arcgis use hyperlink and show script, select python
108 | put in
109 |
110 | ```
111 | import subprocess
112 | def OpenLink ( [jpg_path] ):
113 | bag = [jpg_path]
114 | comnd = 'measure.exe -p {}'.format(bag)
115 | subprocess.call(comnd)
116 | return
117 | ```
118 |
119 |
120 | The method I suggest here is to use the hlk file, by open hlk from the arcgis integration folder, the working directory will be automatically changed to this folder.
121 | Then the video, photo function can be open automatically.
122 |
123 | End with an example of getting some data out of the system or using it for a little demo
124 |
125 | in here we use subprocess because the current realsense library will freeze after set_real_time(False)
126 | and ArcMap don't support multithread nor multicore
127 | therefore we can't use the simple import but have to call out command line and then run python
128 |
129 | The running image will be like this:
130 |
131 | use spacebar to switch between modes, video mode can browse through the frames faster
132 | left click and hold to measure, right click to erase all
133 | 
134 | 
135 |
136 | ## Authors
137 |
138 | * **Che-Yi Hung** - *Initial work* - [soarwing52](https://github.com/soarwing52)
139 |
140 |
--------------------------------------------------------------------------------
/arcgis_integration/Script.hlk:
--------------------------------------------------------------------------------
1 | i m p o r t s u b p r o c e s s
2 | d e f O p e n L i n k ( [ P a t h ] ) :
3 | b a g = [ P a t h ]
4 | c o m n d = r ' Z : \ V o r l a g e n \ R e a l S e n s e \ A r c G I S \ m e a s u r e . e x e - p { } ' . f o r m a t ( b a g )
5 | s u b p r o c e s s . c a l l ( c o m n d )
6 | r e t u r n
--------------------------------------------------------------------------------
/arcgis_integration/__pycache__/class_measure.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/soarwing52/RealsensePython/d4f5cf5c0be9ec7d84e9d759886144c8127d445d/arcgis_integration/__pycache__/class_measure.cpython-36.pyc
--------------------------------------------------------------------------------
/arcgis_integration/__pycache__/class_measure.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/soarwing52/RealsensePython/d4f5cf5c0be9ec7d84e9d759886144c8127d445d/arcgis_integration/__pycache__/class_measure.cpython-37.pyc
--------------------------------------------------------------------------------
/arcgis_integration/class_measure.py:
--------------------------------------------------------------------------------
1 | import pyrealsense2 as rs
2 | import numpy as np
3 | import cv2
4 | import os
5 | import copy, math
6 | import Tkinter
7 |
8 |
9 | class Arc_Real:
10 | def __init__(self, jpg_path):
11 | # Basic setting
12 | self.width, self.height = self.screen_size()
13 | self.x_ratio, self.y_ratio = self.screen_ratio()
14 |
15 | # Data from txt log file
16 | BagFilePath = os.path.abspath(jpg_path)
17 | file_dir = os.path.dirname(BagFilePath)
18 | Pro_Dir = os.path.dirname(file_dir)
19 | self.weg_id, target_color = os.path.splitext(os.path.basename(BagFilePath))[0].split('-')
20 |
21 | with open('{}/shp/matcher.txt'.format(Pro_Dir), 'r') as txtfile:
22 | self.title = [elt.strip() for elt in txtfile.readline().split(',')]
23 | self.frame_list = [[elt.strip() for elt in line.split(',')] for line in txtfile if line.split(',')[0] == self.weg_id]
24 |
25 | self.frame_dict, self.i = self.get_attribute(color=target_color, weg_id=self.weg_id)
26 | file_name = '{}\\bag\\{}.bag'.format(Pro_Dir, self.weg_id)
27 | # Start Pipeline
28 | self.pipeline = rs.pipeline()
29 | config = rs.config()
30 | config.enable_device_from_file(file_name)
31 | config.enable_all_streams()
32 | profile = self.pipeline.start(config)
33 | device = profile.get_device()
34 | playback = device.as_playback()
35 | playback.set_real_time(False)
36 |
37 | self.frame_getter('Color') # Get Color Frame with the matching frame number from self.frame_dict
38 | mode = 'Video Mode'
39 | direction = 1
40 | while True:
41 | img = self.frame_to_image()
42 | self.img_work(mode=mode, img=img)
43 | cv2.namedWindow("Color Stream", cv2.WINDOW_FULLSCREEN)
44 | cv2.imshow("Color Stream", img)
45 |
46 | if mode == 'Measure Mode':
47 | self.img_origin = self.img_work(mode='Measure Mode', img=img)
48 | self.img_copy = copy.copy(self.img_origin)
49 | cv2.setMouseCallback("Color Stream", self.draw)
50 | cv2.imshow("Color Stream", self.img_copy)
51 |
52 | key = cv2.waitKeyEx(0)
53 |
54 | # if pressed escape exit program
55 | if key == 27 or key == 113 or cv2.getWindowProperty('Color Stream', cv2.WND_PROP_VISIBLE) < 1:
56 | break
57 |
58 | elif key == 32: # press space
59 | if mode == 'Measure Mode':
60 | mode = 'Video Mode'
61 | else:
62 | self.frame_dict, self.i = self.get_attribute(color=self.color_frame_num, weg_id=self.weg_id)
63 | self.img_work(mode='Searching', img=img)
64 | cv2.imshow("Color Stream", img)
65 | cv2.waitKey(1)
66 | item = self.get_attribute(color=self.color_frame_num, weg_id=self.weg_id)
67 | if item is not None:
68 | print item
69 | mode = 'Measure Mode'
70 |
71 | elif key == 2555904: # press right
72 | self.i += 1
73 | direction = 1
74 | elif key == 2424832: # press left
75 | self.i -= 1
76 | direction = -1
77 |
78 | if mode == 'Measure Mode':
79 | self.img_work(mode='Searching', img=img)
80 | cv2.imshow("Color Stream", img)
81 | cv2.waitKey(1)
82 | while True:
83 | Color, Depth = self.frame_getter('Color'), self.frame_getter('Depth')
84 | if Color and Depth:
85 | break
86 | else:
87 | self.i += direction
88 |
89 | if mode != 'Measure Mode':
90 | frames = self.pipeline.wait_for_frames()
91 | self.color_frame = frames.get_color_frame()
92 | self.color_frame_num = self.color_frame.get_frame_number()
93 |
94 | print('finish')
95 | cv2.destroyAllWindows()
96 | os._exit(0)
97 | self.pipeline.stop()
98 |
99 | def screen_size(self):
100 | root = Tkinter.Tk()
101 | width = root.winfo_screenwidth()
102 | height = root.winfo_screenheight()
103 | return int(width * 0.8), int(height * 0.8)
104 |
105 | def screen_ratio(self):
106 | img_size = (1920.0, 1080.0)
107 | screen = self.screen_size()
108 | width_ratio, height_ratio = screen[0]/img_size[0],screen[1]/img_size[1]
109 | return width_ratio, height_ratio
110 |
111 | def get_attribute(self, color, weg_id):
112 | for obj in self.frame_list:
113 | if obj[0] == weg_id and abs(int(obj[2]) - int(color)) < 5:
114 | i = self.frame_list.index(obj)
115 | obj = dict(zip(self.title, obj))
116 | return obj, i
117 |
118 | def index_to_obj(self):
119 | if self.i >= len(self.frame_list):
120 | self.i = self.i % len(self.frame_list) +1
121 | content = self.frame_list[self.i]
122 | self.frame_dict = dict(zip(self.title, content))
123 |
124 | def draw(self, event, x, y, flags, params):
125 | img = copy.copy(self.img_copy)
126 | if event == 1:
127 | self.ix = x
128 | self.iy = y
129 | cv2.imshow("Color Stream", self.img_copy)
130 | elif event == 4:
131 | img = self.img_copy
132 | self.img_work(img=img, mode='calc', x=x, y=y)
133 | cv2.imshow("Color Stream", img)
134 | elif event == 2:
135 | self.img_copy = copy.copy(self.img_origin)
136 | cv2.imshow("Color Stream", self.img_copy)
137 | elif flags == 1:
138 | self.img_work(img=img, mode='calc', x=x, y=y)
139 | cv2.imshow("Color Stream", img)
140 |
141 | def img_work(self, mode, img, x=0, y=0):
142 | if 'Measure' in mode: # show black at NAN
143 | depth_image = np.asanyarray(self.depth_frame.get_data())
144 | grey_color = 0
145 | depth_image_3d = np.dstack(
146 | (depth_image, depth_image, depth_image)) # depth image is 1 channel, color is 3 channels
147 | depth_image_3d= cv2.resize(depth_image_3d, self.screen_size())
148 | img = np.where((depth_image_3d <= 0), grey_color, img)
149 |
150 | font = cv2.FONT_ITALIC
151 | fontScale = 1
152 | fontColor = (0, 0, 0)
153 | lineType = 2
154 |
155 | # Add frame number
156 | mid_line_frame_num = int(self.width/4)
157 | rec1, rec2 = (mid_line_frame_num - 50, 20), (mid_line_frame_num + 50, 60)
158 | text = str(self.color_frame_num)
159 | bottomLeftCornerOfText = (mid_line_frame_num - 45, 50)
160 | cv2.rectangle(img, rec1, rec2, (255, 255, 255), -1)
161 | cv2.putText(img, text, bottomLeftCornerOfText, font, fontScale, fontColor, lineType)
162 |
163 | if mode == 'calc':
164 | pt1, pt2 = (self.ix, self.iy), (x, y)
165 | ans = self.calculate_distance(x, y)
166 | text = '{0:.3}'.format(ans)
167 | bottomLeftCornerOfText = (self.ix + 10, (self.iy - 10))
168 | rec1, rec2 = (self.ix + 10, (self.iy - 5)), (self.ix + 80, self.iy - 35)
169 |
170 | cv2.line(img, pt1=pt1, pt2=pt2, color=(0, 0, 230), thickness=3)
171 |
172 | else:
173 | mid_line_screen = int(self.width/2)
174 | rec1, rec2 = (mid_line_screen-100, 20), (mid_line_screen + 130, 60)
175 | text = mode
176 | bottomLeftCornerOfText = (mid_line_screen - 95, 50)
177 |
178 | cv2.rectangle(img, rec1, rec2, (255, 255, 255), -1)
179 | cv2.putText(img, text, bottomLeftCornerOfText, font, fontScale, fontColor, lineType)
180 | return img
181 |
182 | def calculate_distance(self, x, y):
183 | color_intrin = self.color_intrin
184 | x_ratio, y_ratio = self.x_ratio, self.y_ratio
185 | ix, iy = int(self.ix/x_ratio), int(self.iy/y_ratio)
186 | x, y = int(x/x_ratio), int(y/y_ratio)
187 | udist = self.depth_frame.get_distance(ix, iy)
188 | vdist = self.depth_frame.get_distance(x, y)
189 | if udist ==0.00 or vdist ==0.00:
190 | dist = 'NaN'
191 | else:
192 | point1 = rs.rs2_deproject_pixel_to_point(color_intrin, [ix, iy], udist)
193 | point2 = rs.rs2_deproject_pixel_to_point(color_intrin, [x, y], vdist)
194 | dist = math.sqrt(
195 | math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1], 2) + math.pow(
196 | point1[2] - point2[2], 2))
197 | return dist
198 |
199 | def frame_getter(self, mode):
200 | align_to = rs.stream.color
201 | align = rs.align(align_to)
202 | count = 0
203 | while True:
204 | self.index_to_obj()
205 | num = self.frame_dict[mode]
206 | frames = self.pipeline.wait_for_frames()
207 | aligned_frames = align.process(frames)
208 |
209 | if mode == 'Color':
210 | frame = aligned_frames.get_color_frame()
211 | self.color_frame_num = frame.get_frame_number()
212 | self.color_intrin = frame.profile.as_video_stream_profile().intrinsics
213 | self.color_frame = frame
214 | else:
215 | frame = aligned_frames.get_depth_frame()
216 | self.depth_frame_num = frame.get_frame_number()
217 | self.depth_frame = frame
218 |
219 | frame_num = frame.get_frame_number()
220 |
221 | count = self.count_search(count, frame_num, int(num))
222 | print 'Suchen {}: {}, Jetzt: {}'.format(mode, num, frame_num)
223 |
224 | if abs(int(frame_num) - int(num)) < 5:
225 | print('match {} {}'.format(mode, frame_num))
226 | return frame
227 | elif count > 10:
228 | print(num + ' nicht gefunden, suchen naechste frame')
229 | return None
230 |
231 |
232 | def frame_to_image(self):
233 | color_image = np.asanyarray(self.color_frame.get_data())
234 | color_cvt = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
235 | color_final = cv2.resize(color_cvt, (self.width, self.height))
236 | return color_final
237 |
238 | def count_search(self, count, now, target):
239 | if abs(now - target) < 100:
240 | count += 1
241 | print 'search count:{}'.format(count)
242 | return count
243 |
244 |
245 | def main():
246 | Arc_Real(r'X:/WER_Werl/Realsense/jpg/0917_012-1916.jpg')
247 |
248 | if __name__ == '__main__':
249 | main()
250 |
--------------------------------------------------------------------------------
/arcgis_integration/command.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import class_measure
3 | import os
4 |
5 | ap = argparse.ArgumentParser()
6 | ap.add_argument("-p","--path", required=False)
7 |
8 | args = vars(ap.parse_args())
9 |
10 | jpg_path = args['path']
11 |
12 | if 'Kamera' in jpg_path:
13 | os.startfile(jpg_path)
14 | else:
15 | class_measure.Arc_Real(jpg_path)
16 |
17 |
--------------------------------------------------------------------------------
/arcgis_integration/exe_measure.py:
--------------------------------------------------------------------------------
1 | import pyrealsense2 as rs
2 | import numpy as np
3 | import cv2
4 | import os
5 | import copy, math
6 | import tkinter as Tkinter
7 | import argparse
8 |
9 |
10 | class Arc_Real:
11 | def __init__(self, jpg_path):
12 | # Basic setting
13 | self.width, self.height = self.screen_size()
14 | self.x_ratio, self.y_ratio = self.screen_ratio()
15 |
16 | # Data from txt log file
17 | BagFilePath = os.path.abspath(jpg_path)
18 | file_dir = os.path.dirname(BagFilePath)
19 | Pro_Dir = os.path.dirname(file_dir)
20 | self.weg_id, target_color = os.path.splitext(os.path.basename(BagFilePath))[0].split('-')
21 |
22 | with open('{}/shp/matcher.txt'.format(Pro_Dir), 'r') as txtfile:
23 | self.title = [elt.strip() for elt in txtfile.readline().split(',')]
24 | self.frame_list = [[elt.strip() for elt in line.split(',')] for line in txtfile if line.split(',')[0] == self.weg_id]
25 |
26 | self.frame_dict, self.i = self.get_attribute(color=target_color, weg_id=self.weg_id)
27 | file_name = '{}\\bag\\{}.bag'.format(Pro_Dir, self.weg_id)
28 | # Start Pipeline
29 | self.pipeline = rs.pipeline()
30 | config = rs.config()
31 | config.enable_device_from_file(file_name)
32 | config.enable_all_streams()
33 | profile = self.pipeline.start(config)
34 | device = profile.get_device()
35 | playback = device.as_playback()
36 | playback.set_real_time(False)
37 |
38 | self.frame_getter('Color') # Get Color Frame with the matching frame number from self.frame_dict
39 | mode = 'Video Mode'
40 | direction = 1
41 | while True:
42 | img = self.frame_to_image()
43 | self.img_work(mode=mode, img=img)
44 | cv2.namedWindow("Color Stream", cv2.WINDOW_FULLSCREEN)
45 | cv2.imshow("Color Stream", img)
46 |
47 | if mode == 'Measure Mode':
48 | self.img_origin = self.img_work(mode='Measure Mode', img=img)
49 | self.img_copy = copy.copy(self.img_origin)
50 | cv2.setMouseCallback("Color Stream", self.draw)
51 | cv2.imshow("Color Stream", self.img_copy)
52 |
53 | key = cv2.waitKeyEx(0)
54 |
55 | # if pressed escape exit program
56 | if key == 27 or key == 113 or cv2.getWindowProperty('Color Stream', cv2.WND_PROP_VISIBLE) < 1:
57 | break
58 |
59 | elif key == 32: # press space
60 | if mode == 'Measure Mode':
61 | mode = 'Video Mode'
62 | else:
63 | self.frame_dict, self.i = self.get_attribute(color=self.color_frame_num, weg_id=self.weg_id)
64 | self.img_work(mode='Searching', img=img)
65 | cv2.imshow("Color Stream", img)
66 | cv2.waitKey(1)
67 | item = self.get_attribute(color=self.color_frame_num, weg_id=self.weg_id)
68 | if item is not None:
69 | print(item)
70 | mode = 'Measure Mode'
71 |
72 | elif key == 2555904: # press right
73 | self.i += 1
74 | direction = 1
75 | elif key == 2424832: # press left
76 | self.i -= 1
77 | direction = -1
78 |
79 | if mode == 'Measure Mode':
80 | self.img_work(mode='Searching', img=img)
81 | cv2.imshow("Color Stream", img)
82 | cv2.waitKey(1)
83 | while True:
84 | Color, Depth = self.frame_getter('Color'), self.frame_getter('Depth')
85 | if Color and Depth:
86 | break
87 | else:
88 | self.i += direction
89 |
90 | if mode != 'Measure Mode':
91 | frames = self.pipeline.wait_for_frames()
92 | self.color_frame = frames.get_color_frame()
93 | self.color_frame_num = self.color_frame.get_frame_number()
94 |
95 | print('finish')
96 | cv2.destroyAllWindows()
97 | os._exit(0)
98 | self.pipeline.stop()
99 |
100 | def screen_size(self):
101 | root = Tkinter.Tk()
102 | width = root.winfo_screenwidth()
103 | height = root.winfo_screenheight()
104 | return int(width * 0.8), int(height * 0.8)
105 |
106 | def screen_ratio(self):
107 | img_size = (1920.0, 1080.0)
108 | screen = self.screen_size()
109 | width_ratio, height_ratio = screen[0]/img_size[0],screen[1]/img_size[1]
110 | return width_ratio, height_ratio
111 |
112 | def get_attribute(self, color, weg_id):
113 | for obj in self.frame_list:
114 | if obj[0] == weg_id and abs(int(obj[2]) - int(color)) < 5:
115 | i = self.frame_list.index(obj)
116 | obj = dict(zip(self.title, obj))
117 | return obj, i
118 |
119 | def index_to_obj(self):
120 | if self.i >= len(self.frame_list):
121 | self.i = self.i % len(self.frame_list) +1
122 | content = self.frame_list[self.i]
123 | self.frame_dict = dict(zip(self.title, content))
124 |
125 | def draw(self, event, x, y, flags, params):
126 | img = copy.copy(self.img_copy)
127 | if event == 1:
128 | self.ix = x
129 | self.iy = y
130 | cv2.imshow("Color Stream", self.img_copy)
131 | elif event == 4:
132 | img = self.img_copy
133 | self.img_work(img=img, mode='calc', x=x, y=y)
134 | cv2.imshow("Color Stream", img)
135 | elif event == 2:
136 | self.img_copy = copy.copy(self.img_origin)
137 | cv2.imshow("Color Stream", self.img_copy)
138 | elif flags == 1:
139 | self.img_work(img=img, mode='calc', x=x, y=y)
140 | cv2.imshow("Color Stream", img)
141 |
142 | def img_work(self, mode, img, x=0, y=0):
143 | if 'Measure' in mode: # show black at NAN
144 | depth_image = np.asanyarray(self.depth_frame.get_data())
145 | grey_color = 0
146 | depth_image_3d = np.dstack(
147 | (depth_image, depth_image, depth_image)) # depth image is 1 channel, color is 3 channels
148 | depth_image_3d= cv2.resize(depth_image_3d, self.screen_size())
149 | img = np.where((depth_image_3d <= 0), grey_color, img)
150 |
151 | font = cv2.FONT_ITALIC
152 | fontScale = 1
153 | fontColor = (0, 0, 0)
154 | lineType = 2
155 |
156 | # Add frame number
157 | mid_line_frame_num = int(self.width/4)
158 | rec1, rec2 = (mid_line_frame_num - 50, 20), (mid_line_frame_num + 50, 60)
159 | text = str(self.color_frame_num)
160 | bottomLeftCornerOfText = (mid_line_frame_num - 45, 50)
161 | cv2.rectangle(img, rec1, rec2, (255, 255, 255), -1)
162 | cv2.putText(img, text, bottomLeftCornerOfText, font, fontScale, fontColor, lineType)
163 |
164 | if mode == 'calc':
165 | pt1, pt2 = (self.ix, self.iy), (x, y)
166 | ans = self.calculate_distance(x, y)
167 | text = '{0:.3}'.format(ans)
168 | bottomLeftCornerOfText = (self.ix + 10, (self.iy - 10))
169 | rec1, rec2 = (self.ix + 10, (self.iy - 5)), (self.ix + 80, self.iy - 35)
170 |
171 | cv2.line(img, pt1=pt1, pt2=pt2, color=(0, 0, 230), thickness=3)
172 |
173 | else:
174 | mid_line_screen = int(self.width/2)
175 | rec1, rec2 = (mid_line_screen-100, 20), (mid_line_screen + 130, 60)
176 | text = mode
177 | bottomLeftCornerOfText = (mid_line_screen - 95, 50)
178 |
179 | cv2.rectangle(img, rec1, rec2, (255, 255, 255), -1)
180 | cv2.putText(img, text, bottomLeftCornerOfText, font, fontScale, fontColor, lineType)
181 | return img
182 |
183 | def calculate_distance(self, x, y):
184 | color_intrin = self.color_intrin
185 | x_ratio, y_ratio = self.x_ratio, self.y_ratio
186 | ix, iy = int(self.ix/x_ratio), int(self.iy/y_ratio)
187 | x, y = int(x/x_ratio), int(y/y_ratio)
188 | udist = self.depth_frame.get_distance(ix, iy)
189 | vdist = self.depth_frame.get_distance(x, y)
190 | if udist ==0.00 or vdist ==0.00:
191 | dist = 'NaN'
192 | else:
193 | point1 = rs.rs2_deproject_pixel_to_point(color_intrin, [ix, iy], udist)
194 | point2 = rs.rs2_deproject_pixel_to_point(color_intrin, [x, y], vdist)
195 | dist = math.sqrt(
196 | math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1], 2) + math.pow(
197 | point1[2] - point2[2], 2))
198 | return dist
199 |
200 | def frame_getter(self, mode):
201 | align_to = rs.stream.color
202 | align = rs.align(align_to)
203 | count = 0
204 | while True:
205 | self.index_to_obj()
206 | num = self.frame_dict[mode]
207 | frames = self.pipeline.wait_for_frames()
208 | aligned_frames = align.process(frames)
209 |
210 | if mode == 'Color':
211 | frame = aligned_frames.get_color_frame()
212 | self.color_frame_num = frame.get_frame_number()
213 | self.color_intrin = frame.profile.as_video_stream_profile().intrinsics
214 | self.color_frame = frame
215 | else:
216 | frame = aligned_frames.get_depth_frame()
217 | self.depth_frame_num = frame.get_frame_number()
218 | self.depth_frame = frame
219 |
220 | frame_num = frame.get_frame_number()
221 |
222 | count = self.count_search(count, frame_num, int(num))
223 | print('Suchen {}: {}, Jetzt: {}'.format(mode, num, frame_num))
224 |
225 | if abs(int(frame_num) - int(num)) < 5:
226 | print('match {} {}'.format(mode, frame_num))
227 | return frame
228 | elif count > 10:
229 | print(num + ' nicht gefunden, suchen naechste frame')
230 | return None
231 |
232 |
233 | def frame_to_image(self):
234 | color_image = np.asanyarray(self.color_frame.get_data())
235 | color_cvt = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
236 | color_final = cv2.resize(color_cvt, (self.width, self.height))
237 | return color_final
238 |
239 | def count_search(self, count, now, target):
240 | if abs(now - target) < 100:
241 | count += 1
242 | print('search count:{}'.format(count))
243 | return count
244 |
245 |
246 | def main():
247 | ap = argparse.ArgumentParser()
248 | ap.add_argument("-p", "--path", required=False)
249 |
250 | args = vars(ap.parse_args())
251 |
252 | jpg_path = args['path']
253 |
254 | if 'Kamera' in jpg_path:
255 | os.startfile(jpg_path)
256 | else:
257 | Arc_Real(jpg_path)
258 |
259 | if __name__ == '__main__':
260 | main()
261 |
--------------------------------------------------------------------------------
/camera/data_capture_no_gui.py:
--------------------------------------------------------------------------------
1 | import pyrealsense2 as rs
2 | import numpy as np
3 | import cv2
4 | import serial
5 | import datetime
6 | import time
7 | import os
8 | from math import sin, cos, sqrt, atan2, radians
9 | from getch import pause_exit
10 | from multiprocessing import Process,Value, Array, Pipe
11 |
12 |
13 | def dir_generate(dir_name):
14 | """
15 | :param dir_name: input complete path of the desired directory
16 | :return: None
17 | """
18 | dir_name = str(dir_name)
19 | if not os.path.exists(dir_name):
20 | try:
21 | os.mkdir(dir_name)
22 | finally:
23 | pass
24 |
25 |
26 | def port_check(gps_on):
27 | """
28 | :param gps_on: when started it is False
29 | :return: when gps started correctly, return True, if error return 3, which will shut down the program
30 | """
31 | serialPort = serial.Serial()
32 | serialPort.baudrate = 4800
33 | serialPort.bytesize = serial.EIGHTBITS
34 | serialPort.parity = serial.PARITY_NONE
35 | serialPort.timeout = 2
36 | exist_port = None
37 | for x in range(1, 10):
38 | portnum = 'COM{}'.format(x)
39 | serialPort.port = 'COM{}'.format(x)
40 | try:
41 | serialPort.open()
42 | serialPort.close()
43 | exist_port = portnum
44 | except serial.SerialException:
45 | pass
46 | finally:
47 | pass
48 | if exist_port:
49 | return exist_port
50 | else:
51 | print ('close other programs using gps or check if the gps is correctly connected')
52 | gps_on.value = 3
53 | os._exit(0)
54 |
55 |
56 |
57 | def gps_dis(location_1,location_2):
58 | """
59 | this is the calculation of the distance between two long/lat locations
60 | input tuple/list
61 | :param location_1: [Lon, Lat]
62 | :param location_2: [Lon, Lat]
63 | :return: distance in meter
64 | """
65 | R = 6373.0
66 |
67 | lat1 = radians(location_1[1])
68 | lon1 = radians(location_1[0])
69 | lat2 = radians(location_2[1])
70 | lon2 = radians(location_2[0])
71 |
72 | dlon = lon2 - lon1
73 | dlat = lat2 - lat1
74 |
75 | a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
76 | c = 2 * atan2(sqrt(a), sqrt(1 - a))
77 |
78 | distance = R * c
79 | distance = distance*1000
80 | #print("Result:", distance)
81 | return distance
82 |
83 |
84 | def min2decimal(in_data):
85 | """
86 | transform lon,lat from 00'00" to decimal
87 | :param in_data: lon / lat
88 | :return: in decimal poiints
89 | """
90 | latgps = float(in_data)
91 | latdeg = int(latgps / 100)
92 | latmin = latgps - latdeg * 100
93 | lat = latdeg + (latmin / 60)
94 | return lat
95 |
96 |
97 | def GPS(Location,gps_on):
98 | """
99 | the main function of starting the GPS
100 | :param Location: mp.Array
101 | :param gps_on: mp.Value
102 | :return:
103 | """
104 |
105 | print('GPS thread start')
106 |
107 | # Set port
108 | serialPort = serial.Serial()
109 | serialPort.port = port_check(gps_on) # Check the available ports, return the valid one
110 | serialPort.baudrate = 4800
111 | serialPort.bytesize = serial.EIGHTBITS
112 | serialPort.parity = serial.PARITY_NONE
113 | serialPort.timeout = 2
114 | serialPort.open()
115 | print ('GPS opened successfully')
116 | lon, lat = 0, 0
117 | try:
118 | while True:
119 | line = serialPort.readline()
120 | data = line.split(b',')
121 | data = [x.decode("UTF-8") for x in data]
122 | if data[0] == '$GPRMC':
123 | if data[2] == "A":
124 | lat = min2decimal(data[3])
125 | lon = min2decimal(data[5])
126 | elif data[0] == '$GPGGA':
127 | if data[6] == '1':
128 | lon = min2decimal(data[4])
129 | lat = min2decimal(data[2])
130 |
131 |
132 | if lon ==0 or lat == 0:
133 | time.sleep(1)
134 |
135 | else:
136 | #print ('gps ready, current location:{},{}'.format(lon,lat))
137 | gps_on.value = True
138 | Location[:] = [lon,lat]
139 | with open('location.csv', 'w') as gps:
140 | gps.write('Lat,Lon\n')
141 | gps.write('{},{}'.format(lat,lon))
142 |
143 |
144 | except serial.SerialException:
145 | print ('Error opening GPS')
146 | gps_on.value = 3
147 | finally:
148 | serialPort.close()
149 | print('GPS finish')
150 |
151 |
152 | def Camera(child_conn, take_pic, Frame_num, camera_on, bag):
153 | """
154 | Main camera running
155 | :param child_conn: source of image, sending to openCV
156 | :param take_pic: mp.Value, receive True will take one picture, and send back False when done
157 | :param Frame_num: mp.Array, frame number of the picture taken
158 | :param camera_on: mp.Value, the status of camera
159 | :param bag: the number of the current recorded file
160 | :return:
161 | """
162 | print('camera start')
163 | bag_name = './bag/{}.bag'.format(bag)
164 | camera_on.value = True
165 | pipeline = rs.pipeline()
166 | config = rs.config()
167 | config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 15)
168 | config.enable_stream(rs.stream.color, 1920, 1080, rs.format.rgb8, 15)
169 | config.enable_record_to_file(bag_name)
170 | profile = pipeline.start(config)
171 |
172 | device = profile.get_device() # get record device
173 | recorder = device.as_recorder()
174 | recorder.pause() # and pause it
175 |
176 | # get sensor and set to high density
177 | depth_sensor = device.first_depth_sensor()
178 | #depth_sensor.set_option(rs.option.visual_preset, 4)
179 | # dev_range = depth_sensor.get_option_range(rs.option.visual_preset)
180 | #preset_name = depth_sensor.get_option_value_description(rs.option.visual_preset, 4)
181 | #print (preset_name)
182 | # set frame queue size to max
183 | sensor = profile.get_device().query_sensors()
184 | for x in sensor:
185 | x.set_option(rs.option.frames_queue_size, 32)
186 | # set auto exposure but process data first
187 | color_sensor = profile.get_device().query_sensors()[1]
188 | color_sensor.set_option(rs.option.auto_exposure_priority, True)
189 |
190 | try:
191 | while True:
192 | frames = pipeline.wait_for_frames()
193 | depth_frame = frames.get_depth_frame()
194 | color_frame = frames.get_color_frame()
195 | depth_color_frame = rs.colorizer().colorize(depth_frame)
196 | depth_image = np.asanyarray(depth_color_frame.get_data())
197 | color_image = np.asanyarray(color_frame.get_data())
198 | child_conn.send((color_image, depth_image))
199 |
200 | if take_pic.value == 1:
201 | recorder.resume()
202 | frames = pipeline.wait_for_frames()
203 | depth_frame = frames.get_depth_frame()
204 | color_frame = frames.get_color_frame()
205 | var = rs.frame.get_frame_number(color_frame)
206 | vard = rs.frame.get_frame_number(depth_frame)
207 | Frame_num[:] = [var,vard]
208 | time.sleep(0.05)
209 | recorder.pause()
210 | take_pic.value = False
211 |
212 | elif camera_on.value == 0:
213 | child_conn.close()
214 | break
215 |
216 | except RuntimeError:
217 | print ('run')
218 |
219 | finally:
220 | print('pipeline closed')
221 | pipeline.stop()
222 |
223 |
224 | def Show_Image(bag, parent_conn, take_pic, Frame_num, camera_on, camera_repeat, gps_on, Location):
225 | """
226 | The GUI for the program, using openCV to send and receive instructions
227 | :param bag: number of the recorded file
228 | :param parent_conn: receiver side of image, to show on openCV, the recorded bag will still in Camera
229 | :param take_pic: send True to let Camera take picture
230 | :param Frame_num: receive frame number
231 | :param camera_on: receive camera situation
232 | :param camera_repeat: mp.Value, determine if automatic start the next record
233 | :param gps_on: receive the status of gps
234 | :param Location: the current Lon, Lat
235 | :return:
236 | """
237 | Pause = False
238 | i = 1
239 | foto_location = (0, 0)
240 | foto_frame = Frame_num[0]
241 |
242 | try:
243 | while True:
244 | (lon, lat) = Location[:]
245 | current_location = (lon, lat)
246 | present = datetime.datetime.now()
247 | date = '{},{},{},{}'.format(present.day, present.month, present.year, present.time())
248 | local_take_pic = False
249 |
250 | color_image, depth_image = parent_conn.recv()
251 | depth_colormap_resize = cv2.resize(depth_image, (424, 240))
252 | color_cvt = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
253 | color_cvt_2 = cv2.resize(color_cvt, (424, 318))
254 | images = np.vstack((color_cvt_2, depth_colormap_resize))
255 | cv2.namedWindow('Color', cv2.WINDOW_AUTOSIZE)
256 | cv2.setWindowProperty('Color', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
257 | cv2.setWindowProperty('Color', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_NORMAL)
258 |
259 | if Pause is True:
260 | cv2.rectangle(images, (111,219), (339,311), (0, 0, 255), -1)
261 | font = cv2.FONT_HERSHEY_SIMPLEX
262 | bottomLeftCornerOfText = (125,290)
263 | fontScale = 2
264 | fontColor = (0, 0, 0)
265 | lineType = 4
266 | cv2.putText(images, 'Pause', bottomLeftCornerOfText, font, fontScale, fontColor, lineType)
267 |
268 | cv2.imshow('Color', images)
269 | key = cv2.waitKeyEx(1)
270 | if take_pic.value == 1 or current_location == foto_location:
271 | continue
272 |
273 | if Pause is False:
274 | if gps_dis(current_location, foto_location) > 15:
275 | local_take_pic = True
276 |
277 | if key == 98 or key == 32:
278 | local_take_pic = True
279 |
280 | if local_take_pic == True:
281 | take_pic.value = True
282 | time.sleep(0.1)
283 | (color_frame_num, depth_frame_num) = Frame_num[:]
284 | logmsg = '{},{},{},{},{},{}\n'.format(i, color_frame_num, depth_frame_num, lon, lat,date)
285 | print('Foto {} gemacht um {:.03},{:.04}'.format(i,lon,lat))
286 | with open('./foto_log/{}.txt'.format(bag), 'a') as logfile:
287 | logfile.write(logmsg)
288 | with open('foto_location.csv', 'a') as record:
289 | record.write(logmsg)
290 | foto_location = (lon, lat)
291 | i += 1
292 | if key & 0xFF == ord('q') or key == 27:
293 | camera_on.value = False
294 | gps_on.value = False
295 | camera_repeat.value = False
296 | print('Camera finish\n')
297 | elif key == 114 or key == 2228224:
298 | camera_on.value = False
299 | camera_repeat.value = True
300 | print ('Camera restart')
301 | elif gps_on is False:
302 | camera_repeat.value = False
303 | elif cv2.waitKey(1) & 0xFF == ord('p') or key == 2162688:
304 | if Pause is False:
305 | print ('pause pressed')
306 | Pause = True
307 | elif Pause is True:
308 | print ('restart')
309 | Pause = False
310 | except EOFError:
311 | pass
312 | finally:
313 | print ('Image thread ended')
314 |
315 |
316 | def bag_num():
317 | """
318 | Generate the number of record file MMDD001
319 | :return:
320 | """
321 | num = 1
322 | now = datetime.datetime.now()
323 | time.sleep(1)
324 |
325 | try:
326 | while True:
327 | file_name = '{:02d}{:02d}_{:03d}'.format(now.month, now.day, num)
328 | bag_name = './bag/{}.bag'.format(file_name)
329 | exist = os.path.isfile(bag_name)
330 | if exist:
331 | num+=1
332 | else:
333 | print ('current filename:{}'.format(file_name))
334 | break
335 | return file_name
336 | finally:
337 | pass
338 |
339 |
340 | def main():
341 | # Create Folders for Data
342 | folder_list = ('bag','foto_log')
343 | for folder in folder_list:
344 | dir_generate(folder)
345 | # Create Variables between Processes
346 | Location = Array('d',[0,0])
347 | Frame_num = Array('i',[0,0])
348 |
349 | take_pic = Value('i',False)
350 | camera_on = Value('i',True)
351 | camera_repeat = Value('i',True)
352 | gps_on = Value('i',False)
353 |
354 | # Start GPS process
355 | gps_process = Process(target=GPS, args=(Location,gps_on,))
356 | gps_process.start()
357 |
358 | print('Program Start')
359 | while camera_repeat.value:
360 | if gps_on.value == 0:
361 | time.sleep(1)
362 | continue
363 | elif gps_on.value == 3:
364 | pause_exit()
365 | break
366 |
367 | else:
368 | parent_conn, child_conn = Pipe()
369 | bag = bag_num()
370 | img_process = Process(target=Show_Image,
371 | args=(bag,parent_conn, take_pic, Frame_num, camera_on, camera_repeat, gps_on, Location,))
372 | img_process.start()
373 | Camera(child_conn,take_pic,Frame_num,camera_on,bag)
374 |
375 | gps_process.terminate()
376 |
377 | if __name__ == '__main__':
378 | main()
--------------------------------------------------------------------------------
/camera/qt gui/cmd_class.py:
--------------------------------------------------------------------------------
1 | import multiprocessing as mp
2 | import pyrealsense2 as rs
3 | import numpy as np
4 | import cv2
5 | import serial
6 | import datetime
7 | import time
8 | import os, sys
9 | from math import sin, cos, sqrt, atan2, radians
10 | import threading
11 |
12 |
13 | def dir_generate(dir_name):
14 | """
15 | :param dir_name: input complete path of the desired directory
16 | :return: None
17 | """
18 | dir_name = str(dir_name)
19 | if not os.path.exists(dir_name):
20 | try:
21 | os.mkdir(dir_name)
22 | finally:
23 | pass
24 |
25 |
26 | def port_check(gps_on):
27 | """
28 | :param gps_on: when started it is False
29 | :return: when gps started correctly, return True, if error return 3, which will shut down the program
30 | """
31 | serialPort = serial.Serial()
32 | serialPort.baudrate = 4800
33 | serialPort.bytesize = serial.EIGHTBITS
34 | serialPort.parity = serial.PARITY_NONE
35 | serialPort.timeout = 2
36 | exist_port = None
37 | win = ['COM{}'.format(x) for x in range(10)]
38 | linux = ['/dev/ttyUSB{}'.format(x) for x in range(5)]
39 | for x in (win + linux):
40 | serialPort.port = x
41 | try:
42 | serialPort.open()
43 | serialPort.close()
44 | exist_port = x
45 | except serial.SerialException:
46 | pass
47 | finally:
48 | pass
49 | if exist_port:
50 | return exist_port
51 | else:
52 | print('close other programs using gps or check if the gps is correctly connected')
53 | gps_on.value = 3
54 |
55 |
56 | def gps_dis(location_1, location_2):
57 | """
58 | this is the calculation of the distance between two long/lat locations
59 | input tuple/list
60 | :param location_1: [Lon, Lat]
61 | :param location_2: [Lon, Lat]
62 | :return: distance in meter
63 | """
64 | R = 6373.0
65 |
66 | lat1 = radians(location_1[1])
67 | lon1 = radians(location_1[0])
68 | lat2 = radians(location_2[1])
69 | lon2 = radians(location_2[0])
70 |
71 | dlon = lon2 - lon1
72 | dlat = lat2 - lat1
73 |
74 | a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
75 | c = 2 * atan2(sqrt(a), sqrt(1 - a))
76 |
77 | distance = R * c
78 | distance = distance * 1000
79 | # print("Result:", distance)
80 | return distance
81 |
82 |
83 | def min2decimal(in_data):
84 | """
85 | transform lon,lat from 00'00" to decimal
86 | :param in_data: lon / lat
87 | :return: in decimal poiints
88 | """
89 | latgps = float(in_data)
90 | latdeg = int(latgps / 100)
91 | latmin = latgps - latdeg * 100
92 | lat = latdeg + (latmin / 60)
93 | return lat
94 |
95 |
96 | def gps_information(port):
97 | lon, lat = 0, 0
98 | try:
99 | while lon == 0 or lat == 0:
100 | line = port.readline()
101 | data = line.split(b',')
102 | data = [x.decode("UTF-8") for x in data]
103 | if data[0] == '$GPRMC':
104 | if data[2] == "A":
105 | lat = min2decimal(data[3])
106 | lon = min2decimal(data[5])
107 | elif data[0] == '$GPGGA':
108 | if data[6] == '1':
109 | lon = min2decimal(data[4])
110 | lat = min2decimal(data[2])
111 | time.sleep(1)
112 | #import random
113 | #if lon == 0 or lat == 0:
114 | # lon, lat = random.random(), random.random()
115 | # print("return", lon, lat)
116 | except:
117 | print('decode error')
118 |
119 | return lon, lat
120 |
121 |
122 | def GPS(Location, gps_on, root):
123 | """
124 |
125 | :param Location: mp.Array fot longitude, latitude
126 | :param gps_on: gps status 0 for resting, 1 for looking for signal, 2 for signal got, 3 for error
127 | :param root: root dir for linux at /home/pi
128 | :return:
129 | """
130 | print('GPS thread start')
131 | # Set port
132 | serialPort = serial.Serial()
133 | serialPort.port = port_check(gps_on) # Check the available ports, return the valid one
134 | serialPort.baudrate = 4800
135 | serialPort.bytesize = serial.EIGHTBITS
136 | serialPort.parity = serial.PARITY_NONE
137 | serialPort.timeout = 2
138 | serialPort.open()
139 | print('GPS opened successfully')
140 | gps_on.value = 2
141 | lon, lat = gps_information(serialPort)
142 | gps_on.value = 1
143 | try:
144 | while gps_on.value != 99:
145 | lon, lat = gps_information(serialPort)
146 | Location[:] = [lon, lat]
147 | with open('{}location.csv'.format(root), 'w') as gps:
148 | gps.write('Lat,Lon\n')
149 | gps.write('{},{}'.format(lat, lon))
150 | # print(lon, lat)
151 |
152 | except serial.SerialException:
153 | print('Error opening GPS')
154 | gps_on.value = 3
155 | finally:
156 | serialPort.close()
157 | print('GPS finish')
158 | gps_on.value = 0
159 |
160 |
161 | def Camera(child_conn, take_pic, frame_num, camera_status, bag):
162 | """
163 |
164 | :param child_conn: mp.Pipe for image
165 | :param take_pic: take pic command, 0 for rest, 1 for take one pic, after taken is 2, log file will turn back to 0
166 | :param frame_num: mp.Array for frame number
167 | :param camera_status: 0 for rest, 1 for running, 99 for end
168 | :param bag: bag path /home/pi/bag
169 | :return:
170 | """
171 | print('camera start')
172 | try:
173 | pipeline = rs.pipeline()
174 | config = rs.config()
175 | config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 15)
176 | config.enable_stream(rs.stream.color, 1920, 1080, rs.format.rgb8, 15)
177 | config.enable_record_to_file(bag)
178 | profile = pipeline.start(config)
179 |
180 | device = profile.get_device() # get record device
181 | recorder = device.as_recorder()
182 | recorder.pause() # and pause it
183 |
184 | # set frame queue size to max
185 | sensor = profile.get_device().query_sensors()
186 | for x in sensor:
187 | x.set_option(rs.option.frames_queue_size, 32)
188 | # set auto exposure but process data first
189 | color_sensor = profile.get_device().query_sensors()[1]
190 | color_sensor.set_option(rs.option.auto_exposure_priority, True)
191 | camera_status.value = 1
192 | while camera_status.value != 99:
193 | frames = pipeline.wait_for_frames()
194 | depth_frame = frames.get_depth_frame()
195 | color_frame = frames.get_color_frame()
196 | depth_color_frame = rs.colorizer().colorize(depth_frame)
197 | depth_image = np.asanyarray(depth_color_frame.get_data())
198 | color_image = np.asanyarray(color_frame.get_data())
199 | depth_colormap_resize = cv2.resize(depth_image, (400, 250))
200 | color_cvt = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
201 | color_cvt_2 = cv2.resize(color_cvt, (400, 250))
202 | img = np.vstack((color_cvt_2, depth_colormap_resize))
203 | child_conn.send(img)
204 |
205 | if take_pic.value == 1:
206 | recorder.resume()
207 | frames = pipeline.wait_for_frames()
208 | depth_frame = frames.get_depth_frame()
209 | color_frame = frames.get_color_frame()
210 | var = rs.frame.get_frame_number(color_frame)
211 | vard = rs.frame.get_frame_number(depth_frame)
212 | frame_num[:] = [var, vard]
213 | time.sleep(0.05)
214 | recorder.pause()
215 | print('taken', frame_num[:])
216 | take_pic.value = 2
217 |
218 | #child_conn.close()
219 | pipeline.stop()
220 |
221 | except RuntimeError:
222 | print('run')
223 |
224 | finally:
225 | print('pipeline closed')
226 | camera_status.value = 98
227 | print('camera value', camera_status.value)
228 |
229 |
230 | def bag_num():
231 | """
232 | Generate the number of record file MMDD001
233 | :return:
234 | """
235 | num = 1
236 | now = datetime.datetime.now()
237 | time.sleep(1)
238 |
239 | try:
240 | while True:
241 | file_name = '{:02d}{:02d}_{:03d}'.format(now.month, now.day, num)
242 | bag_name = 'bag/{}.bag'.format(file_name)
243 | if sys.platform == 'linux':
244 | bag_name = "/home/pi/RR/" + bag_name
245 | exist = os.path.isfile(bag_name)
246 | if exist:
247 | num += 1
248 | else:
249 | print('current filename:{}'.format(file_name))
250 | break
251 | return file_name
252 | finally:
253 | pass
254 |
255 |
256 | class RScam:
257 | def __init__(self):
258 | # Create Folders for Data
259 | if sys.platform == "linux":
260 | self.root_dir = '/home/pi/RR/'
261 | else:
262 | self.root_dir = ''
263 |
264 | folder_list = ('bag', 'foto_log')
265 |
266 | for folder in folder_list:
267 | dir_generate(self.root_dir + folder)
268 | # Create Variables between Processes
269 | self.Location = mp.Array('d', [0, 0])
270 | self.Frame_num = mp.Array('i', [0, 0])
271 |
272 | self.take_pic = mp.Value('i', 0)
273 | self.camera_command = mp.Value('i', 0)
274 | self.gps_status = mp.Value('i', 0)
275 | jpg_path = "/home/pi/RR/jpg.jpeg"
276 | if os.path.isfile(jpg_path):
277 | self.img = cv2.imread(jpg_path)
278 | else:
279 | self.img = cv2.imread('img/1.jpg')
280 |
281 | self.auto = False
282 | self.restart = True
283 | self.command = None
284 | self.distance = 15
285 | self.msg = 'waiting'
286 |
287 | def start_gps(self):
288 | # Start GPS process
289 | gps_process = mp.Process(target=GPS, args=(self.Location, self.gps_status, self.root_dir,))
290 | gps_process.start()
291 |
292 | def main_loop(self):
293 | parent_conn, child_conn = mp.Pipe()
294 | self.img_thread_status = True
295 | image_thread = threading.Thread(target=self.image_receiver, args=(parent_conn,))
296 | image_thread.start()
297 | while self.restart:
298 | self.msg = 'gps status: {}'.format(self.gps_status.value)
299 | if self.gps_status.value == 3:
300 | self.msg = 'error with gps'
301 | break
302 | elif self.gps_status.value == 2:
303 | time.sleep(1)
304 | elif self.gps_status.value == 1 and self.camera_command.value == 0:
305 | bag = bag_num()
306 | bag_name = "{}bag/{}.bag".format(self.root_dir, bag)
307 | cam_process = mp.Process(target=Camera, args=(child_conn, self.take_pic,
308 | self.Frame_num, self.camera_command, bag_name))
309 | cam_process.start()
310 | self.command_receiver(bag)
311 | self.msg = 'end one round'
312 | #print('end one round')
313 | self.camera_command.value = 0
314 | self.gps_status.value = 99
315 | self.img_thread_status = False
316 | self.msg = "waiting"
317 |
318 | def image_receiver(self, parent_conn):
319 | while self.img_thread_status:
320 | try:
321 | self.img = parent_conn.recv()
322 | except EOFError:
323 | print('EOF')
324 |
325 | self.img = cv2.imread("img/1.jpg")
326 | print("img thread closed")
327 |
328 | def command_receiver(self, bag):
329 | i = 1
330 | foto_location = (0, 0)
331 | while self.camera_command.value != 98:
332 | #print('msg', self.msg)
333 | (lon, lat) = self.Location[:]
334 | current_location = (lon, lat)
335 | present = datetime.datetime.now()
336 | date = '{},{},{},{}'.format(present.day, present.month, present.year, present.time())
337 | local_take_pic = False
338 |
339 | if self.take_pic.value == 2:
340 | color_frame_num, depth_frame_num = self.Frame_num[:]
341 | print(color_frame_num, depth_frame_num)
342 | logmsg = '{},{},{},{},{},{}\n'.format(i, color_frame_num, depth_frame_num, lon, lat, date)
343 | #print('Foto {} gemacht um {:.03},{:.04}'.format(i, lon, lat))
344 | self.msg = 'Foto {} gemacht um {:.03},{:.04}'.format(i, lon, lat)
345 | with open('{}foto_log/{}.txt'.format(self.root_dir, bag), 'a') as logfile:
346 | logfile.write(logmsg)
347 | with open('{}foto_location.csv'.format(self.root_dir), 'a') as record:
348 | record.write(logmsg)
349 | foto_location = (lon, lat)
350 | i += 1
351 | self.take_pic.value = 0
352 |
353 | if self.take_pic.value in (1, 2) or current_location == foto_location:
354 | continue
355 |
356 | cmd = self.command
357 |
358 | if cmd == 'auto':
359 | self.auto = True
360 | elif cmd == "pause":
361 | self.auto = False
362 | elif cmd == "shot":
363 | print('take manual')
364 | local_take_pic = True
365 | elif cmd == "restart" or cmd == "quit":
366 | self.camera_command.value = 99
367 | self.msg = cmd
368 | print("close main", self.msg)
369 |
370 | self.command = None
371 |
372 | if self.auto and gps_dis(current_location, foto_location) > self.distance:
373 | local_take_pic = True
374 |
375 | if local_take_pic:
376 | self.take_pic.value = 1
377 |
378 | self.msg = 'main closed'
379 | print("main closed")
380 | self.camera_command.value = 0
381 |
382 |
383 | if __name__ == '__main__':
384 | pass
--------------------------------------------------------------------------------
/camera/qt gui/img/1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/soarwing52/RealsensePython/d4f5cf5c0be9ec7d84e9d759886144c8127d445d/camera/qt gui/img/1.jpg
--------------------------------------------------------------------------------
/camera/qt gui/img/intel.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/soarwing52/RealsensePython/d4f5cf5c0be9ec7d84e9d759886144c8127d445d/camera/qt gui/img/intel.ico
--------------------------------------------------------------------------------
/camera/qt gui/img/intel.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/soarwing52/RealsensePython/d4f5cf5c0be9ec7d84e9d759886144c8127d445d/camera/qt gui/img/intel.png
--------------------------------------------------------------------------------
/camera/qt gui/qtcam.exe:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/soarwing52/RealsensePython/d4f5cf5c0be9ec7d84e9d759886144c8127d445d/camera/qt gui/qtcam.exe
--------------------------------------------------------------------------------
/camera/qt gui/qtcam.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import sys
3 | import multiprocessing as mp
4 | from PyQt5 import QtWidgets, QtCore, QtGui
5 | from PyQt5.QtCore import QThread, pyqtSignal
6 | from PyQt5.QtGui import QImage, QPixmap
7 | from temp import Ui_MainWindow
8 | from cmd_class import RScam
9 | import cv2
10 | import threading
11 |
12 | a = RScam()
13 |
14 |
15 | class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
16 | def __init__(self, parent=None):
17 | super(MainWindow, self).__init__(parent=parent)
18 | self.setupUi(self)
19 | self.setWindowIcon(QtGui.QIcon('img/intel.png'))
20 |
21 | self.timer_camera = QtCore.QTimer()
22 | self.timer_camera.timeout.connect(self.show_image)
23 | self.timer_camera.start(int(1000 / 15))
24 |
25 | self.timer_msg = QtCore.QTimer()
26 | self.timer_msg.timeout.connect(self.get_msg)
27 | self.timer_msg.start(1000)
28 |
29 | self.btn_start.clicked.connect(lambda : self.start())
30 | self.btn_restart.clicked.connect(lambda: self.restart())
31 | self.btn_photo.clicked.connect(lambda: self.foto())
32 | self.btn_quit.clicked.connect(lambda: self.quit_btn())
33 |
34 | self.spinBox.setValue(a.distance)
35 | self.checkBox.stateChanged.connect(lambda : self.auto_change())
36 | self.spinBox.valueChanged.connect(lambda : self.dis_change())
37 |
38 |
39 | def keyPressEvent(self, event):
40 | key = event.key()
41 | if key == 16777238:
42 | if a.auto is True:
43 | self.checkBox.setChecked(False)
44 | else:
45 | self.checkBox.setChecked(True)
46 | print(a.auto)
47 | elif key == 16777239:
48 | self.restart()
49 | elif key == 66:
50 | self.foto()
51 |
52 | def start(self):
53 | if a.gps_status.value == 0:
54 | a.start_gps()
55 | a.restart = True
56 | t = threading.Thread(target=a.main_loop)
57 | t.start()
58 | print("start loop")
59 | return "started camera"
60 | elif a.gps_status.value == 2:
61 | print("wait for signal")
62 | return "waiting for signal"
63 | else:
64 | print(a.gps_status.value, a.camera_command.value)
65 | return "start but waiting GPS"
66 |
67 | def show_image(self):
68 | try:
69 | self.img = a.img
70 | self.img = cv2.resize(self.img, (400, 500))
71 | height, width, channel = self.img.shape
72 | except:
73 | print('no image, get default')
74 | self.img = cv2.imread('1.jpg')
75 | self.img = cv2.resize(self.img, (400, 500))
76 | height, width, channel = self.img.shape
77 |
78 | bytesPerline = 3 * width
79 | self.qImg = QImage(self.img.data, width, height, bytesPerline, QImage.Format_RGB888).rgbSwapped()
80 | # 將QImage顯示出來
81 | self.label.setPixmap(QPixmap.fromImage(self.qImg))
82 |
83 | def restart(self):
84 | print("restart")
85 | a.command = "restart"
86 |
87 | def foto(self):
88 | print("foto")
89 | a.command = 'shot'
90 |
91 | def quit_btn(self):
92 | print("quit")
93 | a.restart = False
94 | a.command = 'quit'
95 |
96 | def auto_change(self):
97 | if self.checkBox.isChecked() is False:
98 | print('no')
99 | self.checkBox.setText('Auto off')
100 | self.checkBox.setChecked(False)
101 | a.auto = False
102 | else:
103 | print('checked')
104 | self.checkBox.setText('Auto on')
105 | self.checkBox.setChecked(True)
106 | a.auto = True
107 |
108 | def dis_change(self):
109 | a.distance = self.spinBox.value()
110 | print(a.distance)
111 |
112 | def get_msg(self):
113 | self.statusbar.showMessage(a.msg)
114 |
115 |
116 |
117 | class msgThread(QThread):
118 | msg = pyqtSignal(str)
119 |
120 | def __int__(self, parent=None):
121 | super(msgThread, self).__init__(parent)
122 |
123 | def run(self):
124 | while True:
125 | #print('ruuning')
126 | self.msg.emit(a.msg)
127 |
128 | if __name__ == "__main__":
129 | mp.freeze_support()
130 | app = QtWidgets.QApplication(sys.argv)
131 | w = MainWindow()
132 | w.show()
133 | sys.exit(app.exec_())
--------------------------------------------------------------------------------
/camera/qt gui/temp.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Form implementation generated from reading ui file 'cam.ui'
4 | #
5 | # Created by: PyQt5 UI code generator 5.13.1
6 | #
7 | # WARNING! All changes made in this file will be lost!
8 |
9 |
10 | from PyQt5 import QtCore, QtGui, QtWidgets
11 |
12 |
13 | class Ui_MainWindow(object):
14 | def setupUi(self, MainWindow):
15 | MainWindow.setObjectName("MainWindow")
16 | MainWindow.resize(428, 675)
17 | font = QtGui.QFont()
18 | font.setPointSize(14)
19 | MainWindow.setFont(font)
20 | self.centralwidget = QtWidgets.QWidget(MainWindow)
21 | self.centralwidget.setObjectName("centralwidget")
22 | self.gridLayoutWidget = QtWidgets.QWidget(self.centralwidget)
23 | self.gridLayoutWidget.setGeometry(QtCore.QRect(10, 10, 401, 191))
24 | self.gridLayoutWidget.setObjectName("gridLayoutWidget")
25 | self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget)
26 | self.gridLayout.setContentsMargins(0, 0, 0, 0)
27 | self.gridLayout.setObjectName("gridLayout")
28 | self.btn_start = QtWidgets.QPushButton(self.gridLayoutWidget)
29 | self.btn_start.setObjectName("btn_start")
30 | self.gridLayout.addWidget(self.btn_start, 0, 0, 1, 1)
31 | self.btn_photo = QtWidgets.QPushButton(self.gridLayoutWidget)
32 | self.btn_photo.setObjectName("btn_photo")
33 | self.gridLayout.addWidget(self.btn_photo, 1, 0, 1, 1)
34 | self.btn_restart = QtWidgets.QPushButton(self.gridLayoutWidget)
35 | self.btn_restart.setObjectName("btn_restart")
36 | self.gridLayout.addWidget(self.btn_restart, 0, 1, 1, 1)
37 | self.btn_quit = QtWidgets.QPushButton(self.gridLayoutWidget)
38 | self.btn_quit.setObjectName("btn_quit")
39 | self.gridLayout.addWidget(self.btn_quit, 1, 1, 1, 1)
40 | self.spinBox = QtWidgets.QSpinBox(self.gridLayoutWidget)
41 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
42 | sizePolicy.setHorizontalStretch(0)
43 | sizePolicy.setVerticalStretch(10)
44 | sizePolicy.setHeightForWidth(self.spinBox.sizePolicy().hasHeightForWidth())
45 | self.spinBox.setSizePolicy(sizePolicy)
46 | self.spinBox.setMinimumSize(QtCore.QSize(0, 50))
47 | self.spinBox.setProperty("value", 15)
48 | self.spinBox.setObjectName("spinBox")
49 | self.gridLayout.addWidget(self.spinBox, 2, 0, 1, 1)
50 | self.checkBox = QtWidgets.QCheckBox(self.gridLayoutWidget)
51 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Ignored)
52 | sizePolicy.setHorizontalStretch(0)
53 | sizePolicy.setVerticalStretch(0)
54 | sizePolicy.setHeightForWidth(self.checkBox.sizePolicy().hasHeightForWidth())
55 | self.checkBox.setSizePolicy(sizePolicy)
56 | self.checkBox.setLayoutDirection(QtCore.Qt.LeftToRight)
57 | self.checkBox.setChecked(False)
58 | self.checkBox.setTristate(False)
59 | self.checkBox.setObjectName("checkBox")
60 | self.gridLayout.addWidget(self.checkBox, 2, 1, 1, 1)
61 | self.label = QtWidgets.QLabel(self.centralwidget)
62 | self.label.setGeometry(QtCore.QRect(10, 210, 401, 441))
63 | self.label.setText("")
64 | self.label.setObjectName("label")
65 | MainWindow.setCentralWidget(self.centralwidget)
66 | self.statusbar = QtWidgets.QStatusBar(MainWindow)
67 | self.statusbar.setObjectName("statusbar")
68 | MainWindow.setStatusBar(self.statusbar)
69 |
70 | self.retranslateUi(MainWindow)
71 | QtCore.QMetaObject.connectSlotsByName(MainWindow)
72 |
73 | def retranslateUi(self, MainWindow):
74 | _translate = QtCore.QCoreApplication.translate
75 | MainWindow.setWindowTitle(_translate("MainWindow", "D435"))
76 | self.btn_start.setText(_translate("MainWindow", "Start"))
77 | self.btn_photo.setText(_translate("MainWindow", "Foto"))
78 | self.btn_restart.setText(_translate("MainWindow", "Restart"))
79 | self.btn_quit.setText(_translate("MainWindow", "Quit"))
80 | self.checkBox.setText(_translate("MainWindow", "Auto off"))
81 |
--------------------------------------------------------------------------------
/examples/bag/0917_006.bag:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:4cad7b10997096dac38a15f8f22bcae3f96a7b145d5ea18cb17ddcc01c080027
3 | size 29586491
4 |
--------------------------------------------------------------------------------
/examples/bag/0917_007.bag:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:41b5867595cbff2d1c891918f781a941a10e8863ba802b672efacdd3fdb8dd50
3 | size 36149780
4 |
--------------------------------------------------------------------------------
/examples/bag/0917_008.bag:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:ce3e0be5c06815dcbaae0abfa534f23907f8ec7a0604aee6339dc1f97e8fb678
3 | size 77956479
4 |
--------------------------------------------------------------------------------
/examples/bag/0917_009.bag:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:5283245f8d269eefa10e3552219545386f25306c9c8817d7b8c9a34e4199f82c
3 | size 38030270
4 |
--------------------------------------------------------------------------------
/examples/bag/0917_010.bag:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:d2750ddc88bf52c4f956953d12030f0312c359bc14bc1eef21985c644c46404f
3 | size 68881561
4 |
--------------------------------------------------------------------------------
/examples/foto_log/0917_006.txt:
--------------------------------------------------------------------------------
1 | 1,7438,7185,7.887318333333333,51.535783333333335,17,9,2019,09:30:27.007127
2 | 2,3,4,7.887463333333333,51.53557166666666,17,9,2019,09:30:37.635633
3 | 3,119,121,7.887505000000001,51.53531833333333,17,9,2019,09:30:43.570878
4 | 4,207,209,7.8875399999999996,51.535065,17,9,2019,09:30:50.636491
5 | 5,314,316,7.887566666666667,51.53483500000001,17,9,2019,09:30:57.578048
6 | 6,417,419,7.887596666666667,51.534580000000005,17,9,2019,09:31:05.657929
7 | 7,538,541,7.887621666666666,51.534349999999996,17,9,2019,09:31:12.625715
8 | 8,642,645,7.887653333333333,51.53409666666666,17,9,2019,09:31:20.620529
9 |
--------------------------------------------------------------------------------
/examples/foto_log/0917_007.txt:
--------------------------------------------------------------------------------
1 | 1,762,765,7.8854750000000005,51.538705,17,9,2019,09:34:10.533987
2 | 2,2,2,7.88587,51.538734999999996,17,9,2019,09:34:28.625544
3 | 3,192,189,7.886241666666666,51.538775,17,9,2019,09:34:35.637720
4 | 4,297,294,7.8866249999999996,51.53880666666667,17,9,2019,09:34:43.580824
5 | 5,416,413,7.887006666666667,51.53883166666666,17,9,2019,09:34:51.595298
6 | 6,537,534,7.887401666666666,51.53886333333333,17,9,2019,09:34:59.582895
7 | 7,656,654,7.887771666666667,51.53890666666666,17,9,2019,09:35:07.627775
8 | 8,776,774,7.888168333333335,51.538955,17,9,2019,09:35:15.620082
9 | 9,897,895,7.8885683333333345,51.53899666666666,17,9,2019,09:35:24.692446
10 | 10,1028,1011,7.8889700000000005,51.53904666666667,17,9,2019,09:35:32.573204
11 | 11,1140,1121,7.889323333333334,51.53910166666666,17,9,2019,09:35:39.634703
12 | 12,1246,1227,7.889683333333333,51.53912833333334,17,9,2019,09:35:46.627150
13 |
--------------------------------------------------------------------------------
/examples/foto_log/0917_008.txt:
--------------------------------------------------------------------------------
1 | 1,0,0,7.890798333333334,51.52924500000001,17,9,2019,09:40:55.586933
2 | 2,3,4,7.890821666666667,51.52947000000001,17,9,2019,09:41:46.674105
3 | 3,735,704,7.890853333333332,51.52972499999999,17,9,2019,09:41:51.643906
4 | 4,808,763,7.890883333333333,51.52999499999999,17,9,2019,09:41:56.621535
5 | 5,874,827,7.890906666666666,51.53026,17,9,2019,09:42:00.632488
6 | 6,934,887,7.890928333333333,51.53051666666667,17,9,2019,09:42:04.689596
7 | 7,994,947,7.890944999999999,51.53076666666666,17,9,2019,09:42:08.683390
8 | 8,1054,1007,7.8909633333333336,51.531016666666666,17,9,2019,09:42:12.610645
9 | 9,1113,1066,7.89097,51.53126333333333,17,9,2019,09:42:16.933595
10 | 10,1177,1117,7.8909666666666665,51.53149833333334,17,9,2019,09:42:20.676740
11 | 11,1225,1162,7.89096,51.53173333333334,17,9,2019,09:42:24.603987
12 | 12,1284,1221,7.890953333333332,51.531973333333326,17,9,2019,09:42:28.689688
13 | 13,1344,1281,7.890948333333334,51.532221666666665,17,9,2019,09:42:34.333712
14 | 14,1429,1366,7.890946666666667,51.53222000000001,17,9,2019,09:42:36.514398
15 | 15,1461,1398,7.891125,51.531616666666665,17,9,2019,09:43:39.961702
16 | 16,2291,2216,7.891428333333333,51.53148666666666,17,9,2019,09:43:51.644077
17 | 17,2465,2390,7.891751666666666,51.531355,17,9,2019,09:43:56.609966
18 | 18,2540,2466,7.892144999999999,51.531346666666664,17,9,2019,09:44:03.641623
19 | 19,2646,2572,7.892394999999999,51.531560000000006,17,9,2019,09:44:08.639207
20 |
--------------------------------------------------------------------------------
/examples/foto_log/0917_009.txt:
--------------------------------------------------------------------------------
1 | 1,2720,2646,7.8886199999999995,51.52488833333334,17,9,2019,09:47:28.473416
2 | 2,2,2,7.888273333333332,51.524965,17,9,2019,09:47:36.670879
3 | 3,61,61,7.887879999999999,51.52500666666666,17,9,2019,09:47:41.617565
4 | 4,123,124,7.8875183333333325,51.525038333333335,17,9,2019,09:47:45.626982
5 | 5,184,185,7.887091666666667,51.52509166666667,17,9,2019,09:47:50.613457
6 | 6,258,259,7.886685,51.525173333333335,17,9,2019,09:47:55.620938
7 | 7,333,334,7.88627,51.525245,17,9,2019,09:48:00.685155
8 | 8,409,410,7.885825,51.525306666666665,17,9,2019,09:48:05.623222
9 | 9,484,485,7.880416666666667,51.51863,17,9,2019,09:52:50.434094
10 |
--------------------------------------------------------------------------------
/examples/foto_log/0917_010.txt:
--------------------------------------------------------------------------------
1 | 1,4471,4383,7.880433333333333,51.51862166666667,17,9,2019,09:52:58.308600
2 | 2,2,2,7.880766666666666,51.51872333333334,17,9,2019,09:53:19.628462
3 | 3,272,271,7.881128333333334,51.518775000000005,17,9,2019,09:53:22.633422
4 | 4,317,316,7.881521666666667,51.518815,17,9,2019,09:53:25.700486
5 | 5,363,362,7.881948333333334,51.518855,17,9,2019,09:53:28.692002
6 | 6,408,407,7.882393333333333,51.51891833333334,17,9,2019,09:53:31.632987
7 | 7,452,451,7.88285,51.518978333333344,17,9,2019,09:53:34.638026
8 | 8,498,497,7.883335,51.51902,17,9,2019,09:53:37.659350
9 | 9,542,541,7.883695,51.519058333333334,17,9,2019,09:53:39.656078
10 | 10,572,572,7.884056666666667,51.51909166666666,17,9,2019,09:53:41.656554
11 | 11,602,602,7.88442,51.51911833333334,17,9,2019,09:53:43.628595
12 | 12,632,632,7.884938333333334,51.51915,17,9,2019,09:53:46.664893
13 | 13,677,677,7.885416666666667,51.519178333333336,17,9,2019,09:53:49.670705
14 | 14,723,723,7.8858983333333335,51.51921166666666,17,9,2019,09:53:52.683134
15 | 15,767,767,7.886383333333333,51.519238333333334,17,9,2019,09:53:55.664459
16 | 16,812,812,7.8868383333333325,51.51925833333333,17,9,2019,09:53:58.668721
17 | 17,857,857,7.887270000000001,51.519281666666664,17,9,2019,09:54:01.688648
18 | 18,902,902,7.887653333333333,51.51929333333332,17,9,2019,09:54:04.676994
19 | 19,948,948,7.88803,51.51926333333334,17,9,2019,09:54:07.663353
20 | 20,990,977,7.888481666666666,51.519223333333336,17,9,2019,09:54:11.672959
21 |
--------------------------------------------------------------------------------
/examples/google earth.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/soarwing52/RealsensePython/d4f5cf5c0be9ec7d84e9d759886144c8127d445d/examples/google earth.PNG
--------------------------------------------------------------------------------
/processor/data_process_tkinter.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from tkinter import *
3 | from tkinter import filedialog
4 | import numpy as np
5 | import cv2
6 | import multiprocessing as mp
7 | import os
8 | import piexif
9 | from fractions import Fraction
10 |
11 | def to_deg(value, loc):
12 | """convert decimal coordinates into degrees, munutes and seconds tuple
13 | Keyword arguments: value is float gps-value, loc is direction list ["S", "N"] or ["W", "E"]
14 | return: tuple like (25, 13, 48.343 ,'N')
15 | """
16 | if value < 0:
17 | loc_value = loc[0]
18 | elif value > 0:
19 | loc_value = loc[1]
20 | else:
21 | loc_value = ""
22 | abs_value = abs(value)
23 | deg = int(abs_value)
24 | t1 = (abs_value-deg)*60
25 | min = int(t1)
26 | sec = round((t1 - min)* 60, 5)
27 | return (deg, min, sec, loc_value)
28 |
29 |
30 | def change_to_rational(number):
31 | """convert a number to rational
32 | Keyword arguments: number
33 | return: tuple like (1, 2), (numerator, denominator)
34 | """
35 | f = Fraction(str(number))
36 | return (f.numerator, f.denominator)
37 |
38 |
39 | def set_gps_location(file_name, lat, lng, altitude):
40 | """Adds GPS position as EXIF metadata
41 | Keyword arguments:
42 | file_name -- image file
43 | lat -- latitude (as float)
44 | lng -- longitude (as float)
45 | altitude -- altitude (as float)
46 | """
47 | lat_deg = to_deg(lat, ["S", "N"])
48 | lng_deg = to_deg(lng, ["W", "E"])
49 |
50 | exiv_lat = (change_to_rational(lat_deg[0]), change_to_rational(lat_deg[1]), change_to_rational(lat_deg[2]))
51 | exiv_lng = (change_to_rational(lng_deg[0]), change_to_rational(lng_deg[1]), change_to_rational(lng_deg[2]))
52 |
53 | gps_ifd = {
54 | piexif.GPSIFD.GPSVersionID: (2, 0, 0, 0),
55 | piexif.GPSIFD.GPSAltitudeRef: 1,
56 | piexif.GPSIFD.GPSAltitude: change_to_rational(round(altitude)),
57 | piexif.GPSIFD.GPSLatitudeRef: lat_deg[3],
58 | piexif.GPSIFD.GPSLatitude: exiv_lat,
59 | piexif.GPSIFD.GPSLongitudeRef: lng_deg[3],
60 | piexif.GPSIFD.GPSLongitude: exiv_lng,
61 | }
62 |
63 | exif_dict = {"GPS": gps_ifd}
64 | exif_bytes = piexif.dump(exif_dict)
65 | piexif.insert(exif_bytes, file_name)
66 |
67 |
68 | def geotag():
69 | """
70 | collect gps location of matcher.txt and match to jpg
71 | :return:
72 | """
73 | global project_dir,t
74 | print('start geotag')
75 | fotolog = project_dir + '/shp/matcher.txt'
76 | jpg_dir = project_dir + '/jpg'
77 | with open(fotolog, 'r') as log:
78 | log.readline()
79 | for y in log:
80 | data = y.split(',')
81 | weg_num, frame, Lat, Lon = data[0], data[2], float(data[4]), float(data[5])
82 | jpgname = '{}/{}-{}.jpg'.format(jpg_dir, weg_num, frame)
83 | try:
84 | set_gps_location(jpgname, Lat, Lon, 0)
85 | except IOError:
86 | print(jpgname + ' not found')
87 | continue
88 | finally:
89 | pass
90 | print('tagged')
91 | t.insert(END, 'geotagging fertig\n')
92 |
93 |
94 | def dir_generate(in_dir):
95 | """exmaine if this folder exist, if not, create one"""
96 | in_dir = str(in_dir)
97 | if not os.path.exists(in_dir):
98 | try:
99 | os.makedirs(in_dir, 0o700)
100 | except WindowsError:
101 | print('already exist')
102 | finally:
103 | pass
104 | return in_dir
105 |
106 |
107 | def get_dir(var):
108 | """ Tkinter button to get the project directory"""
109 | global project_dir
110 | project_dir = filedialog.askdirectory()
111 | var.set(project_dir)
112 | print(project_dir)
113 | return project_dir
114 |
115 |
116 | def pair(num,tet,ans):
117 | """match fotolog and existing photos, with the accuracy of +-2 frames"""
118 | project_dir = ans
119 | photo_dir = project_dir + '/jpg'
120 | with open('{}/foto_log/{}.txt'.format(project_dir, num), 'r') as foto:
121 | foto = [x.strip().split(',') for x in foto]
122 |
123 | global written_lonlat
124 | written_lonlat = [0,0]
125 | for lines_l in foto:
126 | ID = lines_l[0]
127 | color_l = lines_l[1]
128 | Depth = lines_l[2]
129 | Lon = lines_l[3]
130 | Lat = lines_l[4]
131 | Time = lines_l[8]
132 |
133 |
134 | for i in range(-5,5):
135 | ans = int(color_l) + i
136 | if os.path.isfile('{}/jpg/{}-{}.jpg'.format(project_dir,num,ans)) and [Lat,Lon] != written_lonlat:
137 | jpg = '{}/jpg/{}-{}.jpg'.format(project_dir, num, ans)
138 | info = '{},{},{},{},{},{},{},{}\n'.format(num, ID, ans, Depth, Lat, Lon, Time, jpg)
139 | tet.write(info)
140 | written_lonlat = [Lat, Lon]
141 | print(num + ' done')
142 |
143 |
144 | def pair_list(ans):
145 | """Tkinter Button, loop through files in fotolog and create paired matcher.txt in shp folder"""
146 | project_dir = ans
147 | foto_log = project_dir + '/foto_log'
148 | shp_dir = dir_generate(project_dir + '/shp')
149 |
150 | with open(shp_dir + '/matcher.txt', 'w') as txt:
151 | txt.write('weg_num,foto_id,Color,Depth,Lat,Lon,Uhrzeit,jpg_path\n')
152 | for log in os.listdir(foto_log):
153 | num = log.split('.')[0]
154 | try:
155 | pair(num,txt,ans)
156 | except IOError:
157 | print('no file {}'.format(num))
158 | finally:
159 | print('pair list done')
160 |
161 | def color_to_jpg(input_file):
162 | """create jpg with the input file in the folder jpg"""
163 | import pyrealsense2 as rs
164 | try:
165 | print(input_file)
166 | bagname = os.path.basename(input_file)
167 | bagdir = os.path.dirname(input_file)
168 | projectdir = os.path.dirname(bagdir)
169 |
170 | pipeline = rs.pipeline()
171 | config = rs.config()
172 | config.enable_all_streams()
173 | config.enable_device_from_file(input_file, False)
174 | profile = pipeline.start(config)
175 | device = profile.get_device()
176 | playback = device.as_playback()
177 | playback.set_real_time(False)
178 | while True:
179 | frames = pipeline.wait_for_frames()
180 | color_frame = frames.get_color_frame()
181 | var = rs.frame.get_frame_number(color_frame)
182 | color_image = np.asanyarray(color_frame.get_data())
183 | color_cvt = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
184 | jpg_name = '{}/jpg/{}-{}.jpg'.format(projectdir, bagname[:-4], var)
185 | # print jpg_name
186 | exist = os.path.isfile(jpg_name)
187 | if exist:
188 | # print 'jpg exist'
189 | pass
190 | else:
191 | print('compressing {}'.format(jpg_name))
192 | kernel = np.array([[0, -1, 0],
193 | [-1, 5, -1],
194 | [0, -1, 0]])
195 | sharpened = cv2.filter2D(color_cvt, -1,kernel) # applying the sharpening kernel to the input image & displaying it.
196 | cv2.imwrite((jpg_name), sharpened, [cv2.IMWRITE_JPEG_QUALITY,100])
197 | except RuntimeError:
198 | pass
199 | finally:
200 | print('frame covert finished for ' + input_file)
201 | pipeline.stop()
202 |
203 |
204 |
205 | def generate_jpg():
206 | """Loop through all the bag files until all jpgs were created
207 | if theres unreadable bag file it will keep on looping"""
208 | global project_dir,t
209 | jpg_dir = dir_generate(project_dir + '/jpg')
210 | bag_dir = dir_generate(project_dir + '/bag')
211 |
212 | try:
213 | for i in range(2):
214 | x = [os.path.join(bag_dir, bag) for bag in os.listdir(bag_dir)]
215 | pool = mp.Pool()
216 | pool.map(color_to_jpg, x)
217 |
218 | finally:
219 | print('jpg fertig')
220 | t.insert(END, 'JPG fertig\n')
221 |
222 |
223 | def from_bag_to_list(ans):
224 | project_dir = ans
225 | pair_list(ans)
226 | matchershp()
227 |
228 |
229 | def main():
230 | """TKinter for data process"""
231 | global t
232 | root = Tk()
233 | root.title('shapefile generator')
234 | root.geometry('500x300')
235 | frame = Frame(root, bd=5)
236 | frame.pack()
237 | var = StringVar()
238 | l = Label(frame, textvariable=var, bg='white', bd=5, width=40)
239 | ans = lambda: get_dir(var)
240 | b = Button(frame, text='select folder', height=2, command= ans)
241 | b.pack()
242 | l.pack()
243 | frame_2 = Frame(root, height=20, bd=5)
244 | frame_2.pack()
245 | frame_b = Frame(root)
246 | frame_b.pack()
247 | t = Text(frame_b, width=40, height=10)
248 | t.pack()
249 |
250 | Button(frame_2, text='generate shp', command=lambda: from_bag_to_list(project_dir)).grid(row=1, column=2)
251 | Button(frame_2, text='generate jpg', command=lambda: generate_jpg()).grid(row=1, column=1)
252 | Button(frame_2, text='geotag', command=geotag).grid(row=1, column=4)
253 |
254 | root.mainloop()
255 |
256 |
257 | if __name__ == '__main__':
258 | main()
259 |
--------------------------------------------------------------------------------
/processor/gui2.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Form implementation generated from reading ui file 'new.ui'
4 | #
5 | # Created by: PyQt5 UI code generator 5.13.1
6 | #
7 | # WARNING! All changes made in this file will be lost!
8 |
9 |
10 | from PyQt5 import QtCore, QtGui, QtWidgets
11 |
12 |
13 | class Ui_MainWindow(object):
14 | def setupUi(self, MainWindow):
15 | MainWindow.setObjectName("MainWindow")
16 | MainWindow.resize(608, 428)
17 | self.centralwidget = QtWidgets.QWidget(MainWindow)
18 | self.centralwidget.setObjectName("centralwidget")
19 | self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralwidget)
20 | self.verticalLayout_2.setObjectName("verticalLayout_2")
21 | self.dir_button = QtWidgets.QPushButton(self.centralwidget)
22 | self.dir_button.setMaximumSize(QtCore.QSize(16777215, 16777215))
23 | self.dir_button.setToolTipDuration(2)
24 | self.dir_button.setObjectName("dir_button")
25 | self.verticalLayout_2.addWidget(self.dir_button)
26 | self.project_lable = QtWidgets.QLabel(self.centralwidget)
27 | self.project_lable.setToolTipDuration(2)
28 | self.project_lable.setAutoFillBackground(False)
29 | self.project_lable.setStyleSheet("QLabel {background-color:rgb(255, 255, 255);color:black;}")
30 | self.project_lable.setFrameShape(QtWidgets.QFrame.StyledPanel)
31 | self.project_lable.setFrameShadow(QtWidgets.QFrame.Plain)
32 | self.project_lable.setScaledContents(False)
33 | self.project_lable.setAlignment(QtCore.Qt.AlignCenter)
34 | self.project_lable.setOpenExternalLinks(False)
35 | self.project_lable.setProperty("folder_name", "")
36 | self.project_lable.setObjectName("project_lable")
37 | self.verticalLayout_2.addWidget(self.project_lable)
38 | self.verticalLayout = QtWidgets.QVBoxLayout()
39 | self.verticalLayout.setObjectName("verticalLayout")
40 | self.make_jpg = QtWidgets.QPushButton(self.centralwidget)
41 | self.make_jpg.setObjectName("make_jpg")
42 | self.verticalLayout.addWidget(self.make_jpg)
43 | self.make_list = QtWidgets.QPushButton(self.centralwidget)
44 | self.make_list.setObjectName("make_list")
45 | self.verticalLayout.addWidget(self.make_list)
46 | self.geotag = QtWidgets.QPushButton(self.centralwidget)
47 | self.geotag.setObjectName("geotag")
48 | self.verticalLayout.addWidget(self.geotag)
49 | self.verticalLayout_2.addLayout(self.verticalLayout)
50 | self.textBrowser = QtWidgets.QTextBrowser(self.centralwidget)
51 | self.textBrowser.setObjectName("textBrowser")
52 | self.verticalLayout_2.addWidget(self.textBrowser)
53 | self.progressBar = QtWidgets.QProgressBar(self.centralwidget)
54 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
55 | sizePolicy.setHorizontalStretch(0)
56 | sizePolicy.setVerticalStretch(0)
57 | sizePolicy.setHeightForWidth(self.progressBar.sizePolicy().hasHeightForWidth())
58 | self.progressBar.setSizePolicy(sizePolicy)
59 | self.progressBar.setProperty("value", 0)
60 | self.progressBar.setObjectName("progressBar")
61 | self.verticalLayout_2.addWidget(self.progressBar)
62 | MainWindow.setCentralWidget(self.centralwidget)
63 | self.statusbar = QtWidgets.QStatusBar(MainWindow)
64 | self.statusbar.setObjectName("statusbar")
65 | MainWindow.setStatusBar(self.statusbar)
66 |
67 | self.retranslateUi(MainWindow)
68 | QtCore.QMetaObject.connectSlotsByName(MainWindow)
69 |
70 | def retranslateUi(self, MainWindow):
71 | _translate = QtCore.QCoreApplication.translate
72 | MainWindow.setWindowTitle(_translate("MainWindow", "JPG Generator"))
73 | self.dir_button.setText(_translate("MainWindow", "Select Project"))
74 | self.project_lable.setText(_translate("MainWindow", "Projekt Ordner"))
75 | self.make_jpg.setText(_translate("MainWindow", "Generate JPG"))
76 | self.make_list.setText(_translate("MainWindow", "Match List"))
77 | self.geotag.setText(_translate("MainWindow", "Geotag"))
78 |
--------------------------------------------------------------------------------
/processor/proccessor.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import sys, time
3 | from PyQt5 import QtWidgets, QtCore
4 | from PyQt5.QtCore import QTimer, QThread, pyqtSignal
5 | from PyQt5.QtWidgets import QFileDialog
6 | from gui2 import Ui_MainWindow
7 |
8 | import sys
9 | import numpy as np
10 | import cv2
11 | import multiprocessing as mp
12 | import threading
13 | import os
14 | import piexif
15 | from fractions import Fraction
16 | from queue import Queue
17 |
18 |
19 | RUNNING = False
20 |
21 |
22 | def set_gps_location(file_name, lat, lng, altitude):
23 | """Adds GPS position as EXIF metadata
24 | Keyword arguments:
25 | file_name -- image file
26 | lat -- latitude (as float)
27 | lng -- longitude (as float)
28 | altitude -- altitude (as float)
29 | """
30 | lat_deg = to_deg(lat, ["S", "N"])
31 | lng_deg = to_deg(lng, ["W", "E"])
32 |
33 | exiv_lat = (change_to_rational(lat_deg[0]), change_to_rational(lat_deg[1]), change_to_rational(lat_deg[2]))
34 | exiv_lng = (change_to_rational(lng_deg[0]), change_to_rational(lng_deg[1]), change_to_rational(lng_deg[2]))
35 |
36 | gps_ifd = {
37 | piexif.GPSIFD.GPSVersionID: (2, 0, 0, 0),
38 | piexif.GPSIFD.GPSAltitudeRef: 1,
39 | piexif.GPSIFD.GPSAltitude: change_to_rational(round(altitude)),
40 | piexif.GPSIFD.GPSLatitudeRef: lat_deg[3],
41 | piexif.GPSIFD.GPSLatitude: exiv_lat,
42 | piexif.GPSIFD.GPSLongitudeRef: lng_deg[3],
43 | piexif.GPSIFD.GPSLongitude: exiv_lng,
44 | }
45 |
46 | exif_dict = {"GPS": gps_ifd}
47 | exif_bytes = piexif.dump(exif_dict)
48 | piexif.insert(exif_bytes, file_name)
49 |
50 |
51 | def to_deg(value, loc):
52 | """convert decimal coordinates into degrees, munutes and seconds tuple
53 | Keyword arguments: value is float gps-value, loc is direction list ["S", "N"] or ["W", "E"]
54 | return: tuple like (25, 13, 48.343 ,'N')
55 | """
56 | if value < 0:
57 | loc_value = loc[0]
58 | elif value > 0:
59 | loc_value = loc[1]
60 | else:
61 | loc_value = ""
62 | abs_value = abs(value)
63 | deg = int(abs_value)
64 | t1 = (abs_value - deg) * 60
65 | min = int(t1)
66 | sec = round((t1 - min) * 60, 5)
67 | return deg, min, sec, loc_value
68 |
69 |
70 | def change_to_rational(number):
71 | """convert a number to rational
72 | Keyword arguments: number
73 | return: tuple like (1, 2), (numerator, denominator)
74 | """
75 | f = Fraction(str(number))
76 | return f.numerator, f.denominator
77 |
78 |
79 | def generate_jpg(project_dir, q):
80 | """Loop through all the bag files until all jpgs were created
81 | if theres unreadable bag file it will keep on looping"""
82 | bag_dir = project_dir + '/bag'
83 | global RUNNING
84 | RUNNING = True
85 | try:
86 | for i in range(2):
87 | x = [os.path.join(bag_dir, bag) for bag in os.listdir(bag_dir)]
88 | pool = mp.Pool()
89 | count = 0
90 | for result in pool.imap_unordered(color_to_jpg, x):
91 | count += 1
92 | q.put(count * 100 / len(x))
93 |
94 |
95 | #pool.map(color_to_jpg, x)
96 |
97 | finally:
98 | print('jpg fertig')
99 | RUNNING = False
100 |
101 |
102 | def color_to_jpg(input_file):
103 | """create jpg with the input file in the folder jpg"""
104 | import pyrealsense2 as rs
105 | try:
106 | #print(input_file)
107 | bagname = os.path.basename(input_file)
108 | bagdir = os.path.dirname(input_file)
109 | projectdir = os.path.dirname(bagdir)
110 |
111 | pipeline = rs.pipeline()
112 | config = rs.config()
113 | config.enable_all_streams()
114 | config.enable_device_from_file(input_file, False)
115 | profile = pipeline.start(config)
116 | device = profile.get_device()
117 | playback = device.as_playback()
118 | playback.set_real_time(False)
119 | while True:
120 | frames = pipeline.wait_for_frames()
121 | color_frame = frames.get_color_frame()
122 | var = rs.frame.get_frame_number(color_frame)
123 | color_image = np.asanyarray(color_frame.get_data())
124 | color_cvt = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
125 | jpg_name = '{}/jpg/{}-{}.jpg'.format(projectdir, bagname[:-4], var)
126 | # print jpg_name
127 | exist = os.path.isfile(jpg_name)
128 | if exist:
129 | # print 'jpg exist'
130 | pass
131 | else:
132 | #print('compressing {}'.format(jpg_name))
133 | kernel = np.array([[0, -1, 0],
134 | [-1, 5, -1],
135 | [0, -1, 0]])
136 | sharpened = cv2.filter2D(color_cvt, -1,kernel) # applying the sharpening kernel to the input image & displaying it.
137 | cv2.imwrite((jpg_name), sharpened, [cv2.IMWRITE_JPEG_QUALITY,100])
138 | except RuntimeError:
139 | pass
140 | finally:
141 | print('frame covert finished for ' + input_file)
142 | pipeline.stop()
143 |
144 |
145 |
146 | class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
147 | def __init__(self, parent=None):
148 | super(MainWindow, self).__init__(parent=parent)
149 | self.setupUi(self)
150 | self.dir_button.clicked.connect(lambda: self.get_prjdir_dialog())
151 | self.make_jpg.clicked.connect(lambda: self.generate_jpg_button())
152 | self.make_list.clicked.connect(lambda: self.pair_list())
153 | self.geotag.clicked.connect(lambda: self.geotag_thread())
154 |
155 | self.msg = QtWidgets.QMessageBox()
156 | self.msg.setWindowTitle('Warning')
157 | self.msg.setIcon(QtWidgets.QMessageBox.Information)
158 |
159 | def get_prjdir_dialog(self):
160 | self.prj_dir = QFileDialog.getExistingDirectory()
161 | self.project_lable.setText(self.prj_dir)
162 | self.project_lable.adjustSize()
163 |
164 | def dir_generate(self, in_dir):
165 | """exmaine if this folder exist, if not, create one"""
166 | in_dir = str(in_dir)
167 | if not os.path.exists(in_dir):
168 | try:
169 | os.makedirs(in_dir, 0o700)
170 | except WindowsError:
171 | print('already exist')
172 | finally:
173 | pass
174 | return in_dir
175 |
176 | def generate_jpg_button(self):
177 | self.dir_generate(self.prj_dir+'/jpg')
178 | if not RUNNING:
179 | self.t_jpg = JPGThread()
180 | self.t_jpg.set_dir(self.prj_dir)
181 | self.t_jpg.start()
182 | self.t_jpg.update_progressbar.connect(self.update_progressbar)
183 | self.t_jpg.msg.connect(self.message_reciever)
184 | else:
185 | self.msg.setText('still creating JPG')
186 | self.msg.exec_()
187 |
188 | def pair(self, num):
189 | """match fotolog and existing photos, with the accuracy of +-5 frames"""
190 | project_dir = self.prj_dir
191 | with open('{}/foto_log/{}.txt'.format(project_dir, num), 'r') as foto:
192 | foto = [x.strip().split(',') for x in foto]
193 |
194 | self.written_lonlat = [0, 0]
195 | for lines_l in foto:
196 | ID = lines_l[0]
197 | color_l = lines_l[1]
198 | Depth = lines_l[2]
199 | Lon = lines_l[3]
200 | Lat = lines_l[4]
201 | Time = lines_l[8]
202 |
203 | for i in range(-5, 5):
204 | ans = int(color_l) + i
205 | if os.path.isfile('{}/jpg/{}-{}.jpg'.format(project_dir, num, ans)) and [Lat, Lon] != self.written_lonlat:
206 | jpg = '{}/jpg/{}-{}.jpg'.format(project_dir, num, ans)
207 | info = '{},{},{},{},{},{},{},{}\n'.format(num, ID, ans, Depth, Lat, Lon, Time, jpg)
208 | with open(project_dir + '/shp/matcher.txt', 'a') as txt:
209 | txt.write(info)
210 | self.written_lonlat = [Lat, Lon]
211 | #print(num + ' done')
212 |
213 | def pair_list(self):
214 | """Tkinter Button, loop through files in fotolog and create paired matcher.txt in shp folder"""
215 | global RUNNING
216 | project_dir = self.prj_dir
217 | foto_log = project_dir + '/foto_log'
218 | shp_dir = self.dir_generate(project_dir + '/shp')
219 | RUNNING = True
220 | with open(shp_dir + '/matcher.txt', 'w') as txt:
221 | txt.write('weg_num,foto_id,Color,Depth,Lat,Lon,Uhrzeit,jpg_path\n')
222 | source = os.listdir(foto_log)
223 | total = len(source)
224 | for i, log in enumerate(source):
225 | num = log.split('.')[0]
226 | try:
227 | self.pair(num)
228 | self.update_progressbar(i*100/total)
229 | self.statusbar.showMessage(num)
230 | except IOError:
231 | print('no file {}'.format(num))
232 | finally:
233 | pass
234 | print('pair list done')
235 | RUNNING = False
236 | self.update_progressbar(100)
237 | self.textBrowser.append('List fertig')
238 |
239 | def geotag_thread(self):
240 | if not RUNNING:
241 | self.t_geotag = GeotagThread()
242 | self.t_geotag.prj(self.prj_dir)
243 | self.t_geotag.start()
244 | self.t_geotag.update_progressbar.connect(self.update_progressbar)
245 | self.t_geotag.msg.connect(self.message_reciever)
246 | else:
247 | self.msg.setText('still geotagging')
248 | self.msg.exec_()
249 |
250 | def update_progressbar(self, val):
251 | self.progressBar.setValue(val)
252 |
253 | def message_reciever(self, msg):
254 | if 'Error' in msg or 'fertig' in msg:
255 | self.textBrowser.append(msg)
256 | self.statusbar.showMessage(msg)
257 |
258 | class JPGThread(QThread):
259 | update_progressbar = pyqtSignal(float)
260 | msg = pyqtSignal(str)
261 | def __int__(self, parent=None):
262 | super(JPGThread, self).__init__(parent)
263 |
264 | def run(self):
265 | q = Queue()
266 | t1 = threading.Thread(target=generate_jpg, args=(self.prj_dir,q))
267 | t1.start()
268 | while RUNNING:
269 | x = q.get()
270 | self.update_progressbar.emit(x)
271 | self.update_progressbar.emit(100)
272 | self.msg.emit('JPG fertig')
273 |
274 | def set_dir(self,stt):
275 | self.prj_dir = stt
276 |
277 | class GeotagThread(QThread):
278 | update_progressbar = pyqtSignal(float)
279 | msg = pyqtSignal(str)
280 | def __int__(self, parent=None):
281 | super(GeotagThread, self).__init__(parent)
282 |
283 | def run(self):
284 | global RUNNING
285 | RUNNING = True
286 | project_dir = self.prj_dir
287 | print('start geotag')
288 | fotolog = project_dir + '/shp/matcher.txt'
289 | jpg_dir = project_dir + '/jpg'
290 | total = len(os.listdir(jpg_dir))
291 | current = 0
292 |
293 | with open(fotolog, 'r') as log:
294 | log.readline()
295 | for y in log:
296 | data = y.split(',')
297 | weg_num, frame, Lat, Lon = data[0], data[2], float(data[4]), float(data[5])
298 | jpgname = '{}/{}-{}.jpg'.format(jpg_dir, weg_num, frame)
299 | self.msg.emit(jpgname)
300 | try:
301 | set_gps_location(jpgname, Lat, Lon, 0)
302 | current += 1
303 | self.update_progressbar.emit(current*100/total)
304 | except:
305 | print(jpgname + ' not found')
306 | self.msg.emit('Error: {} broken image'.format(jpgname))
307 | finally:
308 | pass
309 | print('tagged')
310 | self.update_progressbar.emit(100)
311 | self.msg.emit('Geotag fertig')
312 | RUNNING = False
313 |
314 | def prj(self, stt):
315 | self.prj_dir = stt
316 |
317 |
318 |
319 | if __name__ == "__main__":
320 | mp.freeze_support()
321 | app = QtWidgets.QApplication(sys.argv)
322 | w = MainWindow()
323 | w.show()
324 | sys.exit(app.exec_())
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | Fraction==1.1.0
2 | numpy==1.17.4
3 | opencv-python==4.1.1.26
4 | piexif==1.1.3
5 | PyQt5==5.13.2
6 | PyQt5-sip==12.7.0
7 | pyrealsense2==2.30.0.1184
8 | pyserial==3.4
9 |
--------------------------------------------------------------------------------
/separate functions/1.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/soarwing52/RealsensePython/d4f5cf5c0be9ec7d84e9d759886144c8127d445d/separate functions/1.PNG
--------------------------------------------------------------------------------
/separate functions/background_remove.py:
--------------------------------------------------------------------------------
1 | # First import library
2 | import pyrealsense2 as rs
3 | # Import Numpy for easy array manipulation
4 | import numpy as np
5 | # Import OpenCV for easy image rendering
6 | import cv2
7 |
8 |
9 | bag = '0627_019.bag'
10 | pipeline = rs.pipeline()
11 |
12 | # Create a config object
13 | config = rs.config()
14 | # Tell config that we will use a recorded device from filem to be used by the pipeline through playback.
15 | config.enable_device_from_file(bag)
16 | # Configure the pipeline to stream the depth stream
17 | config.enable_all_streams()
18 |
19 | profile = pipeline.start(config)
20 |
21 | # Getting the depth sensor's depth scale (see rs-align example for explanation)
22 | depth_sensor = profile.get_device().first_depth_sensor()
23 | depth_scale = depth_sensor.get_depth_scale()
24 | print("Depth Scale is: " , depth_scale)
25 |
26 | # We will be removing the background of objects more than
27 | # clipping_distance_in_meters meters away
28 | clipping_distance_in_meters = 10 #1 meter
29 | clipping_distance = clipping_distance_in_meters / depth_scale
30 |
31 | # Create an align object
32 | # rs.align allows us to perform alignment of depth frames to others frames
33 | # The "align_to" is the stream type to which we plan to align depth frames.
34 | align_to = rs.stream.color
35 | align = rs.align(align_to)
36 |
37 | try:
38 | while True:
39 | # Get frameset of color and depth
40 | frames = pipeline.wait_for_frames()
41 | # frames.get_depth_frame() is a 640x360 depth image
42 |
43 | # Align the depth frame to color frame
44 | aligned_frames = align.process(frames)
45 |
46 | # Get aligned frames
47 | aligned_depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image
48 | color_frame = aligned_frames.get_color_frame()
49 |
50 | # Validate that both frames are valid
51 | if not aligned_depth_frame or not color_frame:
52 | continue
53 |
54 | depth_image = np.asanyarray(aligned_depth_frame.get_data())
55 | color_image = np.asanyarray(color_frame.get_data())
56 |
57 | # Remove background - Set pixels further than clipping_distance to grey
58 | grey_color = 153
59 | depth_image_3d = np.dstack((depth_image,depth_image,depth_image)) #depth image is 1 channel, color is 3 channels
60 | bg_removed = np.where((depth_image_3d <= 0), grey_color, color_image)
61 |
62 | # Render images
63 | depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
64 | images = np.hstack((bg_removed, depth_colormap))
65 | cv2.namedWindow('Align Example', cv2.WINDOW_AUTOSIZE)
66 | cv2.imshow('Align Example', bg_removed)
67 | key = cv2.waitKey(1000)
68 | # Press esc or 'q' to close the image window
69 | if key & 0xFF == ord('q') or key == 27:
70 | cv2.destroyAllWindows()
71 | break
72 | finally:
73 | pipeline.stop()
--------------------------------------------------------------------------------
/separate functions/check_bag_valid.py:
--------------------------------------------------------------------------------
1 | import os
2 | # -*- coding: utf-8 -*-
3 | from Tkinter import *
4 | import tkFileDialog
5 | import os
6 | import arcpy
7 | from arcpy import management
8 | import pyrealsense2 as rs
9 | import numpy as np
10 | import cv2
11 | import multiprocessing as mp
12 | import time
13 |
14 | def test_bag(input_file):
15 | pipeline = rs.pipeline()
16 | config = rs.config()
17 | config.enable_device_from_file(input_file, False)
18 | config.enable_all_streams()
19 | profile = pipeline.start(config)
20 | device = profile.get_device()
21 | playback = device.as_playback()
22 | playback.set_real_time(False)
23 | try:
24 | while True:
25 | frames = pipeline.wait_for_frames()
26 | print 'Xx'
27 | depth_frame = frames.get_depth_frame()
28 | color_frame = frames.get_color_frame()
29 | if not depth_frame or not color_frame:
30 | continue
31 | depth_color_frame = rs.colorizer().colorize(depth_frame)
32 | depth_image = np.asanyarray(depth_color_frame.get_data())
33 | depth_colormap_resize = cv2.resize(depth_image,(424,240))
34 | color_image = np.asanyarray(color_frame.get_data())
35 | color_cvt = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
36 | color_cvt_2 = cv2.resize(color_cvt, (320,240))
37 | images = np.hstack((color_cvt_2, depth_colormap_resize))
38 | cv2.namedWindow('Color', cv2.WINDOW_AUTOSIZE)
39 | cv2.imshow('Color', images)
40 | time.sleep(1)
41 | pipeline.stop()
42 | except RuntimeError:
43 | print '{} unindexed'.format(input_file)
44 | finally:
45 | print 'tested'
46 | pass
47 |
48 | if __name__ == '__main__':
49 | test_bag(r'C:\Users\cyh\Desktop\test\bag\0529_002.bag')
--------------------------------------------------------------------------------
/separate functions/cvcanvas.py:
--------------------------------------------------------------------------------
1 | import cv2
2 |
3 | img_file = r'C:\Users\cyh\Documents\GitHub\RealsensePython\examples/google earth.PNG'
4 | img = cv2.imread(img_file)
5 |
6 |
7 | # Adding Function Attached To Mouse Callback
8 | def draw(event,x,y,flags,params):
9 | global ix,iy,drawing
10 | print event, x, y ,flags, params
11 | img_file = 'examples/google earth.PNG'
12 | img = cv2.imread(img_file)
13 |
14 | # Left Mouse Button Down Pressed
15 | if(event==1):
16 |
17 | drawing = True
18 | ix = x
19 | iy = y
20 |
21 | if(flags==1):
22 | if(drawing==True):
23 | #For Drawing Line
24 | cv2.line(img,pt1=(ix,iy),pt2=(x,y),color=(255,255,255),thickness=3)
25 | cv2.imshow("Window", img)
26 | #ix = x
27 | #iy = y
28 | # For Drawing Rectangle
29 | # cv2.rectangle(image,pt1=(ix,iy),pt2=(x,y),color=(255,255,255),thickness=3)
30 |
31 | if(event==4):
32 | #cv2.line(img, pt1=(ix, iy), pt2=(x, y), color=(255, 255, 255), thickness=3)
33 | drawing = False
34 | #cv2.imshow("Window", img)
35 |
36 |
37 |
38 | # Making Window For The Image
39 | cv2.namedWindow("Window")
40 |
41 | # Adding Mouse CallBack Event
42 | cv2.setMouseCallback("Window",draw)
43 | cv2.imshow("Window",img)
44 | # Starting The Loop So Image Can Be Shown
45 | while(True):
46 | key = cv2.waitKey(1000)
47 | if key & 0xFF == ord('q'):
48 | break
49 | elif key == 32:
50 | img = cv2.imread(img_file)
51 | cv2.imshow("Window", img)
52 |
53 | cv2.destroyAllWindows()
--------------------------------------------------------------------------------
/separate functions/datacapture_mp_kml.py:
--------------------------------------------------------------------------------
1 | import pyrealsense2 as rs
2 | import numpy as np
3 | import cv2
4 | import serial
5 | import datetime
6 | import time
7 | import os
8 | from math import sin, cos, sqrt, atan2, radians
9 | from getch import pause_exit
10 | from multiprocessing import Process,Value, Array, Pipe
11 |
12 |
13 | def dir_generate(dir_name):
14 | dir_name = str(dir_name)
15 | if not os.path.exists(dir_name):
16 | try:
17 | os.makedirs(dir_name, 0o700)
18 | except OSError as e:
19 | if e.errno != errno.EEXIST:
20 | raise
21 |
22 |
23 | def port_check():
24 | serialPort = serial.Serial()
25 | serialPort.baudrate = 4800
26 | serialPort.bytesize = serial.EIGHTBITS
27 | serialPort.parity = serial.PARITY_NONE
28 | serialPort.timeout = 2
29 | exist_port = None
30 | for x in range(1, 10):
31 | portnum = 'COM{}'.format(x)
32 | serialPort.port = 'COM{}'.format(x)
33 | try:
34 | serialPort.open()
35 | serialPort.close()
36 | exist_port = portnum
37 | except serial.SerialException:
38 | pass
39 | finally:
40 | pass
41 | if exist_port:
42 | return exist_port
43 | else:
44 | print ('close other programs using gps')
45 | pause_exit(status=0, message='Press any key to leave')
46 | os._exit(0)
47 |
48 |
49 | def emptykml():
50 | kml = os.path.exists('./kml/Foto.kml')
51 | if kml:
52 | return
53 | else:
54 | with open('./kml/Foto.kml' , 'w') as kml:
55 | text = """
56 |
57 |
58 | Foto.kml
59 | 1
60 |
71 |
82 |
83 |
84 | normal
85 | #s_ylw-pushpin
86 |
87 |
88 | highlight
89 | #s_ylw-pushpin_hl
90 |
91 |
92 |
93 |
94 | """
95 | kml.write(text)
96 |
97 |
98 | def write_kml(lon,lat):
99 | """
100 | To append a point in kml
101 | :param lon:
102 | :param lat:
103 | :return:
104 | """
105 | myfile = open('./kml/Foto.kml', 'r')
106 | doc=myfile.readlines()
107 | myfile.close()
108 |
109 | doc.insert(37,"""
110 | P
111 | #m_ylw-pushpin
112 |
113 | {},{},0.0
114 |
115 | """.format(lon,lat))
116 |
117 | f = open('./kml/Foto.kml', 'w')
118 | contents = "".join(doc)
119 | f.write(contents)
120 | f.close()
121 |
122 |
123 | def gps_dis(location_1,location_2):
124 | '''
125 | this is the calculation of the distance between two long/lat locations
126 | input tuple/list
127 | '''
128 | R = 6373.0
129 |
130 | lat1 = radians(location_1[1])
131 | lon1 = radians(location_1[0])
132 | lat2 = radians(location_2[1])
133 | lon2 = radians(location_2[0])
134 |
135 | dlon = lon2 - lon1
136 | dlat = lat2 - lat1
137 |
138 | a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
139 | c = 2 * atan2(sqrt(a), sqrt(1 - a))
140 |
141 | distance = R * c
142 | distance = distance*1000
143 | #print("Result:", distance)
144 | return distance
145 |
146 |
147 | def min2decimal(in_data):
148 | '''transforming the data of long lat '''
149 | latgps = float(in_data)
150 | latdeg = int(latgps / 100)
151 | latmin = latgps - latdeg * 100
152 | lat = latdeg + (latmin / 60)
153 | return lat
154 |
155 |
156 | def GPS(Location,gps_on):
157 | '''
158 | the main function of starting the GPS
159 | '''
160 | print('GPS start')
161 | serialPort = serial.Serial()
162 | serialPort.port = port_check()
163 | serialPort.baudrate = 4800
164 | serialPort.bytesize = serial.EIGHTBITS
165 | serialPort.parity = serial.PARITY_NONE
166 | serialPort.timeout = 2
167 | serialPort.open()
168 | print ('GPS opened successfully')
169 | gps_on.value = True
170 | lon, lat = 0, 0
171 | try:
172 | while True:
173 | line = serialPort.readline()
174 | data = line.split(b',')
175 | data = [x.decode("UTF-8") for x in data]
176 |
177 | if data[0] == '$GPRMC':
178 | if data[2] == "A":
179 | lat = min2decimal(data[3])
180 | lon = min2decimal(data[5])
181 |
182 | else:
183 | #print 'searching gps'
184 | pass
185 |
186 | elif data[0] == '$GPGGA':
187 | if data[6] == '1':
188 | lon = min2decimal(data[4])
189 | lat = min2decimal(data[2])
190 | else:
191 | #print 'searching gps'
192 | pass
193 |
194 | if lon ==0 or lat == 0:
195 | print ('gps signal not ready')
196 |
197 | else:
198 | #print ('gps ready, current location:{},{}'.format(lon,lat))
199 | Location[:] = [lon,lat]
200 |
201 | with open('./kml/live.kml', 'w') as pos:
202 | googleearth_message = '''
203 |
204 |
205 | Live Point
206 | hi im here
207 |
208 | {},{},0
209 |
210 |
211 | '''.format(lon, lat)
212 | pos.write(googleearth_message)
213 |
214 | if gps_on.value is False:
215 | break
216 | except serial.SerialException:
217 | print ('Error opening GPS')
218 | gps_on.value = False
219 | finally:
220 | serialPort.close()
221 | print('GPS finish')
222 |
223 |
224 | def Camera(child_conn, pic, Frame_num, camera_on, bag):
225 | print('camera start')
226 | bag_name = './bag/{}.bag'.format(bag)
227 | camera_on.value = True
228 | pipeline = rs.pipeline()
229 | config = rs.config()
230 | config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
231 | config.enable_stream(rs.stream.color, 1920, 1080, rs.format.rgb8, 30)
232 | config.enable_record_to_file(bag_name)
233 | profile = pipeline.start(config)
234 |
235 | device = profile.get_device() # get record device
236 | recorder = device.as_recorder()
237 | recorder.pause() # and pause it
238 |
239 | # get sensor and set to high density
240 | depth_sensor = device.first_depth_sensor()
241 | depth_sensor.set_option(rs.option.visual_preset, 4)
242 | # dev_range = depth_sensor.get_option_range(rs.option.visual_preset)
243 | preset_name = depth_sensor.get_option_value_description(rs.option.visual_preset, 4)
244 | print (preset_name)
245 | # set frame queue size to max
246 | sensor = profile.get_device().query_sensors()
247 | for x in sensor:
248 | x.set_option(rs.option.frames_queue_size, 32)
249 | # set auto exposure but process data first
250 | color_sensor = profile.get_device().query_sensors()[1]
251 | color_sensor.set_option(rs.option.auto_exposure_priority, True)
252 |
253 | try:
254 | while True:
255 | frames = pipeline.wait_for_frames()
256 | depth_frame = frames.get_depth_frame()
257 | color_frame = frames.get_color_frame()
258 | depth_color_frame = rs.colorizer().colorize(depth_frame)
259 | depth_image = np.asanyarray(depth_color_frame.get_data())
260 | color_image = np.asanyarray(color_frame.get_data())
261 | child_conn.send((color_image, depth_image))
262 |
263 | take_pic = pic.value
264 | if take_pic == 1:
265 | recorder.resume()
266 | frames = pipeline.wait_for_frames()
267 | depth_frame = frames.get_depth_frame()
268 | color_frame = frames.get_color_frame()
269 | var = rs.frame.get_frame_number(color_frame)
270 | vard = rs.frame.get_frame_number(depth_frame)
271 | Frame_num[:] = [var,vard]
272 | time.sleep(0.05)
273 | #print ('photo taken')
274 | recorder.pause()
275 |
276 | pic.value = False
277 | continue
278 |
279 | elif camera_on.value == 0:
280 | child_conn.close()
281 | break
282 |
283 | except RuntimeError:
284 | print ('run')
285 |
286 | finally:
287 | print('pipeline stop')
288 | pipeline.stop()
289 |
290 |
291 | def Show_Image(bag, parent_conn, take_pic, Frame_num, camera_on, camera_repeat, gps_on, Location):
292 | Pause = False
293 | i = 1
294 | foto_location = (0, 0)
295 | foto_frame = Frame_num[0]
296 | logfile = open('./foto_log/{}.txt'.format(bag),'w')
297 | try:
298 | while True:
299 | (lon, lat) = Location[:]
300 | current_location = (lon, lat)
301 | present = datetime.datetime.now()
302 | date = '{},{},{},{}'.format(present.day, present.month, present.year, present.time())
303 |
304 | color_image,depth_image = parent_conn.recv()
305 | depth_colormap_resize = cv2.resize(depth_image, (424, 240))
306 | color_cvt = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
307 | color_cvt_2 = cv2.resize(color_cvt, (320, 240))
308 | images = np.hstack((color_cvt_2, depth_colormap_resize))
309 | cv2.namedWindow('Color', cv2.WINDOW_AUTOSIZE)
310 |
311 | if Pause is True:
312 | cv2.rectangle(images, (420, 40), (220, 160), (0, 0, 255), -1)
313 | font = cv2.FONT_HERSHEY_SIMPLEX
314 | bottomLeftCornerOfText = (220, 110)
315 | fontScale = 2
316 | fontColor = (0, 0, 0)
317 | lineType = 4
318 | cv2.putText(images, 'Pause', bottomLeftCornerOfText, font, fontScale, fontColor, lineType)
319 |
320 | cv2.imshow('Color', images)
321 | key = cv2.waitKeyEx(1)
322 |
323 | if Pause is True:
324 | if key == 98 or key == 32:
325 | take_pic.value = True
326 | elif Pause is False:
327 | if gps_dis(current_location, foto_location) > 15 or key == 98 or key == 32:
328 | take_pic.value = True
329 |
330 | if take_pic.value == True:
331 | (color_frame_num, depth_frame_num) = Frame_num[:]
332 |
333 | if color_frame_num == foto_frame:
334 | pass
335 | else:
336 | foto_location = (lon, lat)
337 | foto_frame = color_frame_num
338 | logmsg = '{},{},{},{},{},{}\n'.format(i, color_frame_num, depth_frame_num, lon, lat,date)
339 | print(logmsg)
340 | #print('Foto {} gemacht um {:.03},{:.04}'.format(i,lon,lat))
341 | logfile.write(logmsg)
342 | write_kml(lon,lat)
343 | #time.sleep(0.4)
344 | i += 1
345 |
346 | if key & 0xFF == ord('q') or key == 27:
347 | camera_on.value = False
348 | gps_on.value = False
349 | camera_repeat.value = False
350 | print('Camera finish\n')
351 | elif key == 114 or key == 2228224:
352 | camera_on.value = False
353 | camera_repeat.value = True
354 | print ('camera will restart')
355 | elif gps_on is False:
356 | camera_repeat.value = False
357 | elif cv2.waitKey(1) & 0xFF == ord('p') or key == 2162688:
358 | if Pause is False:
359 | print ('pause pressed')
360 | Pause = True
361 | elif Pause is True:
362 | print ('restart')
363 | Pause = False
364 | except EOFError:
365 | pass
366 | finally:
367 | logfile.close()
368 | print ('end showing image')
369 |
370 |
371 | def bag_num():
372 | num = 1
373 | now = datetime.datetime.now()
374 | time.sleep(1)
375 |
376 | try:
377 | while True:
378 | file_name = '{:02d}{:02d}_{:03d}'.format(now.month, now.day, num)
379 | bag_name = './bag/{}.bag'.format(file_name)
380 | exist = os.path.isfile(bag_name)
381 | if exist:
382 | num+=1
383 | else:
384 | print ('current filename:{}'.format(file_name))
385 | break
386 |
387 | finally:
388 | return file_name
389 |
390 |
391 |
392 | def main():
393 | folder_list = ('bag','foto_log','kml')
394 | for folder in folder_list:
395 | dir_generate(folder)
396 |
397 | emptykml()
398 |
399 | Location = Array('d',[0,0])
400 | Frame_num = Array('i',[0,0])
401 |
402 | take_pic = Value('i',False)
403 | camera_on = Value('i',True)
404 | camera_repeat = Value('i',True)
405 | gps_on = Value('i',False)
406 |
407 | gps_process = Process(target=GPS, args=(Location,gps_on,))
408 | gps_process.start()
409 |
410 | print('Program Start')
411 | while camera_repeat.value:
412 | parent_conn, child_conn = Pipe()
413 | bag = bag_num()
414 | img_process = Process(target=Show_Image,
415 | args=(bag,parent_conn, take_pic, Frame_num, camera_on, camera_repeat, gps_on, Location,))
416 | img_process.start()
417 | Camera(child_conn,take_pic,Frame_num,camera_on,bag)
418 |
419 | print ('terminated')
420 |
421 | gps_process.terminate()
422 |
423 | print ('all end')
424 |
425 | if __name__ == '__main__':
426 | main()
--------------------------------------------------------------------------------
/separate functions/geotag.py:
--------------------------------------------------------------------------------
1 | import os
2 | import piexif
3 | from fractions import Fraction
4 |
5 | def to_deg(value, loc):
6 | """convert decimal coordinates into degrees, munutes and seconds tuple
7 | Keyword arguments: value is float gps-value, loc is direction list ["S", "N"] or ["W", "E"]
8 | return: tuple like (25, 13, 48.343 ,'N')
9 | """
10 | if value < 0:
11 | loc_value = loc[0]
12 | elif value > 0:
13 | loc_value = loc[1]
14 | else:
15 | loc_value = ""
16 | abs_value = abs(value)
17 | deg = int(abs_value)
18 | t1 = (abs_value-deg)*60
19 | min = int(t1)
20 | sec = round((t1 - min)* 60, 5)
21 | return (deg, min, sec, loc_value)
22 |
23 |
24 | def change_to_rational(number):
25 | """convert a number to rantional
26 | Keyword arguments: number
27 | return: tuple like (1, 2), (numerator, denominator)
28 | """
29 | f = Fraction(str(number))
30 | return (f.numerator, f.denominator)
31 |
32 |
33 | def set_gps_location(file_name, lat, lng, altitude):
34 | """Adds GPS position as EXIF metadata
35 | Keyword arguments:
36 | file_name -- image file
37 | lat -- latitude (as float)
38 | lng -- longitude (as float)
39 | altitude -- altitude (as float)
40 | """
41 | lat_deg = to_deg(lat, ["S", "N"])
42 | lng_deg = to_deg(lng, ["W", "E"])
43 |
44 | exiv_lat = (change_to_rational(lat_deg[0]), change_to_rational(lat_deg[1]), change_to_rational(lat_deg[2]))
45 | exiv_lng = (change_to_rational(lng_deg[0]), change_to_rational(lng_deg[1]), change_to_rational(lng_deg[2]))
46 |
47 | gps_ifd = {
48 | piexif.GPSIFD.GPSVersionID: (2, 0, 0, 0),
49 | piexif.GPSIFD.GPSAltitudeRef: 1,
50 | piexif.GPSIFD.GPSAltitude: change_to_rational(round(altitude)),
51 | piexif.GPSIFD.GPSLatitudeRef: lat_deg[3],
52 | piexif.GPSIFD.GPSLatitude: exiv_lat,
53 | piexif.GPSIFD.GPSLongitudeRef: lng_deg[3],
54 | piexif.GPSIFD.GPSLongitude: exiv_lng,
55 | }
56 |
57 | exif_dict = {"GPS": gps_ifd}
58 | exif_bytes = piexif.dump(exif_dict)
59 | piexif.insert(exif_bytes, file_name)
60 |
61 | if __name__ == '__main__':
62 | fotolog = r'C:\Users\cyh\Desktop\test\shp'
63 | project_dir = os.path.dirname(fotolog)
64 | png_dir = project_dir + '/jpg'
65 | for x in os.listdir(fotolog):
66 | fullpath = fotolog +'/' +x
67 | with open(fullpath,'r') as log:
68 | for y in log:
69 | data = y.split(',')
70 | num, Lon,Lat = data[1],float(data[3]),float(data[4])
71 | jpgname = '{}/{}-{}.jpg'.format(png_dir,x[:-4],num)
72 | try:
73 | set_gps_location(jpgname,Lat,Lon,0)
74 | except IOError:
75 | print jpgname + 'not found'
76 | continue
77 | finally:
78 | pass
--------------------------------------------------------------------------------
/separate functions/jpgcreator.py:
--------------------------------------------------------------------------------
1 | import pyrealsense2 as rs
2 | import numpy as np
3 | import cv2
4 | import os
5 | import multiprocessing as mp
6 | from itertools import islice
7 |
8 | def color_to_jpg(input_file):
9 | """create jpg with the input file in the folder jpg"""
10 | #print input_file
11 | bagname = os.path.basename(input_file)
12 | bagdir = os.path.dirname(input_file)
13 | projectdir = os.path.dirname(bagdir)
14 |
15 | pipeline = rs.pipeline()
16 | config = rs.config()
17 | config.enable_all_streams()
18 | config.enable_device_from_file(input_file, False)
19 | profile = pipeline.start(config)
20 | device = profile.get_device()
21 | playback = device.as_playback()
22 | playback.set_real_time(False)
23 | align_to = rs.stream.color
24 | align = rs.align(align_to)
25 | try:
26 | while True:
27 | frames = pipeline.wait_for_frames()
28 | color_frame = frames.get_color_frame()
29 | var = rs.frame.get_frame_number(color_frame)
30 | color_image = np.asanyarray(color_frame.get_data())
31 | color_cvt = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
32 | jpg_name = '{}/jpg/{}-{}.jpg'.format(projectdir, bagname[:-4], var)
33 | # print jpg_name
34 | exist = os.path.isfile(jpg_name)
35 | if exist:
36 | # print 'jpg exist'
37 | pass
38 | else:
39 | print 'compressing {}'.format(jpg_name)
40 | cv2.imwrite((jpg_name), color_cvt, [cv2.IMWRITE_JPEG_QUALITY,100])
41 |
42 | except RuntimeError:
43 | print 'frame covert finished for ' + input_file
44 | cv2.destroyAllWindows()
45 | finally:
46 | pipeline.stop()
47 |
48 | def dir_generate(in_dir):
49 | """exmaine if this folder exist, if not, create one"""
50 | in_dir = str(in_dir)
51 | if not os.path.exists(in_dir):
52 | try:
53 | os.makedirs(in_dir, 0o700)
54 | finally:
55 | pass
56 | return in_dir
57 |
58 | def main():
59 | foldername = '0617'
60 | bag_dir = r'X:\Mittelweser\{}\bag'.format(foldername)
61 | foto_dir = dir_generate(r'X:\Mittelweser\{}\jpg'.format(foldername))
62 |
63 |
64 | x =[os.path.join(bag_dir,bag)for bag in os.listdir(bag_dir)]
65 | pool = mp.Pool()
66 | pool.map(color_to_jpg,x)
67 | print 'done'
68 |
69 | if __name__ == '__main__':
70 | main()
--------------------------------------------------------------------------------
/separate functions/live_gps.py:
--------------------------------------------------------------------------------
1 | import serial
2 | import datetime
3 | from math import sin, cos, sqrt, atan2, radians
4 |
5 | def gps_dis(location_1,location_2):
6 | R = 6373.0
7 |
8 | lat1 = radians(location_1[1])
9 | lon1 = radians(location_1[0])
10 | lat2 = radians(location_2[1])
11 | lon2 = radians(location_2[0])
12 |
13 | dlon = lon2 - lon1
14 | dlat = lat2 - lat1
15 |
16 | a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
17 | c = 2 * atan2(sqrt(a), sqrt(1 - a))
18 |
19 | distance = R * c
20 | distance = distance*1000
21 | #print "Result:", distance
22 | return distance
23 |
24 | def min2decimal(in_data):
25 | latgps = float(in_data)
26 | latdeg = int(latgps / 100)
27 | latmin = latgps - latdeg * 100
28 | lat = latdeg + (latmin / 60)
29 | return lat
30 |
31 | def GPS():
32 | lon, lat = 0,0
33 | print('GPS start')
34 | serialPort = serial.Serial()
35 | serialPort.port = 'COM6'
36 | serialPort.baudrate = 4800
37 | serialPort.bytesize = serial.EIGHTBITS
38 | serialPort.parity = serial.PARITY_NONE
39 | serialPort.timeout = 2
40 | serialPort.open()
41 | first_location = [0, 0]
42 | try:
43 | while True:
44 | line = serialPort.readline()
45 | data = line.split(",")
46 | present = datetime.datetime.now()
47 | date = '{},{},{},{}'.format(present.day, present.month, present.year, present.time())
48 | if data[0] == '$GPRMC':
49 | if data[2] == "A":
50 | lat = min2decimal(data[3])
51 | lon = min2decimal(data[5])
52 |
53 | current_location = [lon,lat ]
54 | print 'gps ready, current location:{}'.format(current_location)
55 | gps_dis(current_location,first_location)
56 | first_location = current_location
57 | elif data[0] == '$GPGGA':
58 | if data[6] == '1':
59 | lon = min2decimal(data[4])
60 | lat = min2decimal(data[2])
61 | current_location = [lon, lat]
62 | print 'gps ready, current location:{}'.format(current_location)
63 | else:
64 | print 'searching'
65 | """
66 | with open('./kml/live.kml', 'w') as pos:
67 | googleearth_message = '''
68 |
69 |
70 | Live Point
71 | hi im here
72 |
73 | {},{},0
74 |
75 |
76 | '''.format(lon, lat)
77 | pos.write(googleearth_message)
78 | """
79 | finally:
80 |
81 | print('GPS finish')
82 |
83 | if __name__ == '__main__':
84 | GPS()
85 |
--------------------------------------------------------------------------------
/separate functions/measure_new.py:
--------------------------------------------------------------------------------
1 | import pyrealsense2 as rs
2 | import numpy as np
3 | import cv2
4 | import copy
5 | import math
6 |
7 | class ARC:
8 | def __init__(self):
9 | bag = r'0626_005.bag'
10 | self.pipeline = rs.pipeline()
11 |
12 | config = rs.config()
13 | config.enable_device_from_file(bag, False)
14 | config.enable_all_streams()
15 |
16 | profile = self.pipeline.start(config)
17 | device = profile.get_device()
18 | playback = device.as_playback()
19 | playback.set_real_time(False)
20 |
21 |
22 | def video(self):
23 | align_to = rs.stream.color
24 | align = rs.align(align_to)
25 | for i in range(10):
26 | self.pipeline.wait_for_frames()
27 | while True:
28 | frames = self.pipeline.wait_for_frames()
29 | aligned_frames = align.process(frames)
30 | color_frame = aligned_frames.get_color_frame()
31 | depth_frame = aligned_frames.get_depth_frame()
32 |
33 | self.depth_frame = depth_frame
34 |
35 | color_image = np.asanyarray(color_frame.get_data())
36 | self.color_intrin = color_frame.profile.as_video_stream_profile().intrinsics
37 |
38 | depth_color_frame = rs.colorizer().colorize(depth_frame)
39 |
40 | # Convert depth_frame to numpy array to render image in opencv
41 | depth_color_image = np.asanyarray(depth_color_frame.get_data())
42 | color_image = np.asanyarray(color_frame.get_data())
43 | color_cvt = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
44 | self.show(color_cvt)
45 |
46 | break
47 | # Render image in opencv window
48 | # Create opencv window to render image in
49 |
50 |
51 | def show(self, img):
52 | self.img_origin = img
53 | self.img_copy = copy.copy(self.img_origin)
54 | cv2.namedWindow("Color Stream", cv2.WINDOW_AUTOSIZE)
55 |
56 | cv2.setMouseCallback("Color Stream", self.draw)
57 | while True:
58 | cv2.imshow("Color Stream", self.img_copy)
59 | key = cv2.waitKey(10)
60 | if key & 0xFF == ord('q') or key == 27:
61 | cv2.destroyAllWindows()
62 | break
63 |
64 | def draw(self, event,x,y,flags,params):
65 | img = copy.copy(self.img_copy)
66 | #print event,x,y,flags,params
67 | if(event==1):
68 | self.ix = x
69 | self.iy = y
70 | elif event == 4:
71 | img = self.img_copy
72 | self.img_work(img, x,y)
73 | elif event == 2:
74 | self.img_copy = copy.copy(self.img_origin)
75 | elif(flags==1):
76 | self.img_work(img,x,y)
77 | cv2.imshow("Color Stream", img)
78 |
79 | def img_work(self, img,x,y):
80 | font = cv2.FONT_HERSHEY_SIMPLEX
81 | fontScale = 1
82 | fontColor = (0, 0, 0)
83 | lineType = 2
84 |
85 | ans = self.calculate_distance(x, y)
86 | cv2.line(img, pt1=(self.ix, self.iy), pt2=(x, y), color=(255, 255, 255), thickness=3)
87 | cv2.rectangle(img, (self.ix, self.iy), (self.ix + 80, self.iy - 20), (255, 255, 255), -1)
88 | cv2.putText(img, '{0:.5}'.format(ans), (self.ix, self.iy), font, fontScale, fontColor,
89 | lineType)
90 |
91 | def calculate_distance(self,x,y):
92 | color_intrin = self.color_intrin
93 | ix,iy = self.ix, self.iy
94 | udist = self.depth_frame.get_distance(ix,iy)
95 | vdist = self.depth_frame.get_distance(x, y)
96 | #print udist,vdist
97 |
98 | point1 = rs.rs2_deproject_pixel_to_point(color_intrin, [ix, iy], udist)
99 | point2 = rs.rs2_deproject_pixel_to_point(color_intrin, [x, y], vdist)
100 | #print str(point1)+str(point2)
101 |
102 | dist = math.sqrt(
103 | math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1],2) + math.pow(
104 | point1[2] - point2[2], 2))
105 | #print 'distance: '+ str(dist)
106 | return dist
107 |
108 |
109 | if __name__ == '__main__':
110 | ARC().video()
111 |
--------------------------------------------------------------------------------
/separate functions/opencv_find_key.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import sys
3 |
4 | img = cv2.imread('1.PNG')
5 |
6 | while True:
7 | cv2.imshow('img',img)
8 |
9 | res = cv2.waitKeyEx(0)
10 | print 'You pressed %d (0x%x), LSB: %d (%s)' % (res, res, res % 256,
11 | repr(chr(res%256)) if res%256 < 128 else '?')
12 |
--------------------------------------------------------------------------------
/separate functions/phase 1/Hello World.py:
--------------------------------------------------------------------------------
1 | import pyrealsense2 as rs
2 | # Import Numpy for easy array manipulation
3 | import numpy as np
4 | # Import OpenCV for easy image rendering
5 | import cv2
6 |
7 | # Configure depth and color streams
8 | pipeline = rs.pipeline()
9 | config = rs.config()
10 | config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
11 | config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
12 | # Start streaming
13 | profile = pipeline.start(config)
14 |
15 | for x in range(5):
16 | pipeline.wait_for_frames()
17 | try:
18 | while True:
19 | frame = pipeline.wait_for_frames()
20 | depth_frame = frame.get_depth_frame()
21 | color_frame = frame.get_color_frame()
22 | if not depth_frame or not color_frame:
23 | continue
24 | width = depth_frame.get_width()
25 | height = depth_frame.get_height()
26 | dist = depth_frame.get_distance(width / 2, height / 2)
27 | print 'distance is '+ str(dist)
28 | # Press esc or 'q' to close the image window
29 | key = cv2.waitKey(1)
30 | if key & 0xFF == ord('q') or key == 27:
31 | break
32 |
33 | finally:
34 | # Stop streaming
35 | pipeline.stop()
36 |
37 |
38 |
39 |
--------------------------------------------------------------------------------
/separate functions/phase 1/color_to_png.py:
--------------------------------------------------------------------------------
1 | # First import library
2 | import pyrealsense2 as rs
3 | # Import Numpy for easy array manipulation
4 | import numpy as np
5 | # Import OpenCV for easy image rendering
6 | import cv2
7 |
8 | # Import os.path for file path manipulation
9 | import os.path
10 |
11 | def dir_generate(in_dir):
12 | """test if this folder exist, if not, create one"""
13 | in_dir = str(in_dir)
14 | if not os.path.exists(in_dir):
15 | try:
16 | os.makedirs(in_dir, 0o700)
17 | finally:
18 | pass
19 | return in_dir
20 |
21 | def depth_to_png(input_file):
22 | """create jpg of Depth image with opencv"""
23 | bag_dir = os.path.dirname(input_file)
24 | project_dir = os.path.dirname(bag_dir)
25 | png_dir = dir_generate(project_dir + '/png')
26 | filenum = os.path.splitext(os.path.split(input_file)[1])[0]
27 | try:
28 | pipeline = rs.pipeline()
29 | config = rs.config()
30 | config.enable_all_streams()
31 | config.enable_device_from_file(input_file,False)
32 | profile = pipeline.start(config)
33 | device = profile.get_device()
34 | playback = device.as_playback()
35 | playback.set_real_time(False)
36 | align_to = rs.stream.color
37 | align = rs.align(align_to)
38 |
39 | while True:
40 | frames = pipeline.wait_for_frames()
41 | aligned_frames = align.process(frames)
42 | depth_frame = aligned_frames.get_depth_frame()
43 | color_frame = frames.get_color_frame()
44 | depth_color = rs.colorizer().colorize(depth_frame)
45 | depth_color_image = np.asanyarray(depth_color.get_data())
46 | var = rs.frame.get_frame_number(depth_frame)
47 | cv2.namedWindow("Depth Stream", cv2.WINDOW_AUTOSIZE)
48 | cv2.imshow("Depth Stream", depth_color_image)
49 | print 'compressing {}-{}.png'.format(filenum,str(var))
50 | cv2.imwrite('{}/{}-{}-D.png'.format(png_dir, filenum,str(var)), depth_color_image,[cv2.IMWRITE_PNG_COMPRESSION,1])
51 | key = cv2.waitKey(100)
52 | # if pressed escape exit program
53 | if key == 27:
54 | cv2.destroyAllWindows()
55 | break
56 |
57 | except RuntimeError:
58 | print 'frame covert finished for '+input_file
59 | cv2.destroyAllWindows()
60 | finally:
61 | pass
62 |
63 |
64 | def color_to_png(file_name):
65 | """create jpg of RGB image with opencv"""
66 | bag_dir = os.path.dirname(file_name)
67 | project_dir = os.path.dirname(bag_dir)
68 | png_dir = dir_generate(project_dir + '/png')
69 | filenum = os.path.splitext(os.path.split(file_name)[1])[0]
70 | try:
71 | pipeline = rs.pipeline()
72 | config = rs.config()
73 | config.enable_all_streams()
74 | config.enable_device_from_file(file_name, False)
75 | profile = pipeline.start(config)
76 | device = profile.get_device()
77 | playback = device.as_playback()
78 | playback.set_real_time(False)
79 | while True:
80 | frames = pipeline.wait_for_frames()
81 | frames.keep()
82 | if frames:
83 | color_frame = frames.get_color_frame()
84 | var = rs.frame.get_frame_number(color_frame)
85 | print 'frame number: ' + str(var)
86 | time_color = rs.frame.get_timestamp(color_frame)
87 | color_image = np.asanyarray(color_frame.get_data())
88 | color_cvt = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
89 |
90 | cv2.namedWindow("Color Stream", cv2.WINDOW_AUTOSIZE)
91 | cv2.imshow("Color Stream",color_cvt)
92 | cv2.imwrite('{}/{}_c_{}.png'.format(png_dir,filenum,var),color_cvt)
93 | print '{}/{}_c_{}.png'.format(png_dir,filenum,var)
94 | key = cv2.waitKey(1)
95 | # if pressed escape exit program
96 | if key == 27:
97 | cv2.destroyAllWindows()
98 | break
99 | except RuntimeError:
100 | print("No more frames arrived, reached end of BAG file!")
101 | cv2.destroyAllWindows()
102 | finally:
103 | pipeline.stop()
104 | pass
105 |
106 | def main():
107 | file_name = raw_input('file name: \n')
108 | color_to_png(file_name)
109 | depth_to_png(file_name)
110 |
111 | if __name__ == '__main__':
112 | main()
--------------------------------------------------------------------------------
/separate functions/phase 1/depth to png.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pyrealsense2 as rs
3 | import numpy as np
4 | import cv2
5 |
6 | def color_to_png(input_file):
7 | pipeline = rs.pipeline()
8 | config = rs.config()
9 | config.enable_all_streams()
10 | config.enable_device_from_file('./bag/'+input_file,False)
11 | profile = pipeline.start(config)
12 | device = profile.get_device()
13 | playback = device.as_playback()
14 | playback.set_real_time(False)
15 | align_to = rs.stream.color
16 | align = rs.align(align_to)
17 | try:
18 | while True:
19 | frames = pipeline.wait_for_frames()
20 | aligned_frames = align.process(frames)
21 | depth_frame = aligned_frames.get_depth_frame()
22 | color_frame = frames.get_color_frame()
23 | depth_color = rs.colorizer().colorize(depth_frame)
24 | depth_color_image = np.asanyarray(depth_color.get_data())
25 | var = rs.frame.get_frame_number(color_frame)
26 | color_image = np.asanyarray(color_frame.get_data())
27 | color_cvt = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
28 | cv2.namedWindow("Color Stream", cv2.WINDOW_AUTOSIZE)
29 | cv2.imshow("Color Stream", depth_color_image)
30 | print 'compressing {}-{}.png'.format(input_file[:-4],str(var))
31 | cv2.imwrite('./png/D-{}-{}.png'.format(input_file[:-4],str(var)), depth_color_image,[cv2.IMWRITE_PNG_COMPRESSION,1])
32 | key = cv2.waitKey(100)
33 | # if pressed escape exit program
34 | if key == 27:
35 | cv2.destroyAllWindows()
36 | break
37 |
38 | except RuntimeError:
39 | print 'frame covert finished for '+input_file
40 | cv2.destroyAllWindows()
41 | finally:
42 | pass
43 |
44 |
45 | dir_name = 'bag'
46 | for filename in os.listdir(dir_name):
47 | print filename
48 | color_to_png(filename)
49 |
--------------------------------------------------------------------------------
/separate functions/phase 1/depth_to_png.py:
--------------------------------------------------------------------------------
1 | import pyrealsense2 as rs
2 | # Import Numpy for easy array manipulation
3 | import numpy as np
4 | # Import OpenCV for easy image rendering
5 | import cv2
6 | # Import argparse for command-line options
7 | import argparse
8 | # Import os.path for file path manipulation
9 | import os.path
10 |
11 |
12 | try:
13 | file_name = raw_input('file name: \n')
14 | pipeline = rs.pipeline()
15 | config = rs.config()
16 | config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
17 | config.enable_stream(rs.stream.color,1920,1080,rs.format.rgba8,30)
18 | config.enable_device_from_file(file_name + '.bag')
19 | profile = pipeline.start(config)
20 | i = 0
21 | align = rs.stream.color
22 | align_to = rs.align(align)
23 | # Streaming loop
24 | while True:
25 | frames = pipeline.wait_for_frames()
26 | frames = align_to.process(frames)
27 | if frames:
28 | depth_frame = frames.get_depth_frame()
29 | var_d = rs.frame.get_frame_number(depth_frame)
30 | print 'frame number d: '+ str(var_d)
31 | depth_color_frame = rs.colorizer().colorize(depth_frame)
32 | depth_color_image = np.asanyarray(depth_color_frame.get_data())
33 | cv2.imwrite(file_name +'_d_'+ str(var_d)+'.png',depth_color_image)
34 | cv2.imshow("Depth Stream", depth_color_image)
35 | key = cv2.waitKey(1)
36 | # if pressed escape exit program
37 | if key == 27:
38 | cv2.destroyAllWindows()
39 | break
40 | except RuntimeError:
41 | print "No more frames arrived, reached end of BAG file!"
42 | finally:
43 | pass
44 |
--------------------------------------------------------------------------------
/separate functions/phase 1/manual_photo.py:
--------------------------------------------------------------------------------
1 | import pyrealsense2 as rs
2 | import numpy as np
3 | from matplotlib import pyplot as plt
4 | import cv2
5 | import time
6 | import sys
7 |
8 | file_name = '123.bag' #raw_input('Wegnummer: \n')
9 | pipeline = rs.pipeline()
10 | config = rs.config()
11 | config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 6)
12 | config.enable_stream(rs.stream.color, 1280, 720, rs.format.rgba8, 6)
13 | config.enable_record_to_file(file_name)
14 | profile = pipeline.start(config)
15 |
16 | device = profile.get_device()
17 | recorder = device.as_recorder()
18 |
19 | depth_sensor = device.first_depth_sensor()
20 | depth_sensor.set_option(rs.option.visual_preset, 4)
21 | dev_range = depth_sensor.get_option_range(rs.option.visual_preset)
22 | preset_name = depth_sensor.get_option_value_description(rs.option.visual_preset, 4)
23 | print preset_name
24 |
25 | pause = rs.recorder.pause(recorder)
26 | i=1
27 | try:
28 | while True :
29 | #print 'frame: ' + str(i)
30 | # Wait for a coherent pair of frames: depth and color
31 | frames = pipeline.wait_for_frames()
32 | depth_frame = frames.get_depth_frame()
33 | color_frame = frames.get_color_frame()
34 | if not depth_frame or not color_frame:
35 | continue
36 |
37 | # Convert images to numpy arrays
38 | depth_image = np.asanyarray(depth_frame.get_data())
39 | color_image = np.asanyarray(color_frame.get_data())
40 | color_cvt = cv2.cvtColor(color_image,cv2.COLOR_RGB2BGR)
41 |
42 | # Apply colormap on depth image (image must be converted to 8-bit per pixel first)
43 | depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
44 |
45 | # Stack both images horizontally
46 |
47 |
48 | # Show images
49 | #cv2.namedWindow('depth',cv2.WINDOW_FULLSCREEN)
50 | cv2.namedWindow('Color', cv2.WINDOW_FULLSCREEN)
51 | #cv2.imshow('depth',depth_colormap)
52 | cv2.imshow('Color', color_cvt)
53 | cv2.waitKey(1)
54 | key = cv2.waitKey(1)
55 | i +=1
56 | # Press esc or 'q' to close the image window
57 | if key & 0xFF == ord('q') :
58 | print i
59 | resume = rs.recorder.resume(recorder)
60 | for b in range(10):
61 | frames = pipeline.wait_for_frames()
62 | depth_frame = frames.get_depth_frame()
63 | color_frame = frames.get_color_frame()
64 |
65 | pause = rs.recorder.pause(recorder)
66 |
67 | continue
68 | if key == 27:
69 | cv2.destroyAllWindows()
70 | break
71 |
72 | finally:
73 |
74 | # Stop streaming
75 | pipeline.stop()
--------------------------------------------------------------------------------
/separate functions/phase 1/matchframelist.py:
--------------------------------------------------------------------------------
1 |
2 | depth_frame_list = []
3 | color_frame_list = []
4 | match_frame_list = []
5 | with open('colorlist.txt','r') as csvfile:
6 | for line in csvfile:
7 | frame = [elt.strip() for elt in line.split(',')]
8 | color_frame_list.append(frame)
9 |
10 | with open('depthlist.txt','r') as depcsv:
11 | for dline in depcsv:
12 | frame_d = [dd.strip() for dd in dline.split(',')]
13 | depth_frame_list.append(frame_d)
14 | print depth_frame_list
15 | csvfile.close()
16 | depcsv.close()
17 | f_list = []
18 |
19 | for t_c in color_frame_list:
20 | for t_d in depth_frame_list:
21 | gap = float(t_c[1]) - float(t_d[1])
22 | gap = abs(gap)
23 | if gap <20:
24 | #print gap
25 | frame_match = t_c[0],t_d[0]
26 | #print frame_match
27 | f_list.append(str(t_c[0]) + ','+str(t_d[0]) + '\n')
28 |
29 |
30 | print f_list
31 | unique_list = []
32 | with open('matched.txt','w') as matched:
33 | for elem in f_list:
34 | if elem not in unique_list:
35 | print elem
36 | unique_list.append(elem)
37 | matched.write(elem)
38 |
39 | matched.close()
40 | print unique_list
41 |
42 |
43 |
--------------------------------------------------------------------------------
/separate functions/phase 1/measure.py:
--------------------------------------------------------------------------------
1 | import pyrealsense2 as rs
2 | # Import Numpy for easy array manipulation
3 | import numpy as np
4 | # Import OpenCV for easy image rendering
5 | import cv2
6 | import sys
7 | from matplotlib import pyplot as plt
8 | from matplotlib.widgets2 import Ruler
9 | import os
10 |
11 |
12 | num = raw_input('Wegenummer:\n')
13 | file_name = './bag/{}.bag'.format(num)
14 | pipeline = rs.pipeline()
15 | config = rs.config()
16 | config.enable_device_from_file(file_name)
17 | config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
18 | config.enable_stream(rs.stream.color, 1280, 720, rs.format.rgba8, 30)
19 | profile = pipeline.start(config)
20 | device = profile.get_device()
21 | playback = device.as_playback()
22 | playback.set_real_time(False)
23 | align_to = rs.stream.color
24 | align = rs.align(align_to)
25 | frame_list = []
26 |
27 | with open('./list/{}_matched.txt'.format(num),'r') as csvfile:
28 | for line in csvfile:
29 | frame = [elt.strip() for elt in line.split(',')]
30 | frame_list.append(frame)
31 | i=0
32 | try:
33 | while True:
34 | frames = pipeline.wait_for_frames()
35 | aligned_frames = align.process(frames)
36 | depth_frame = aligned_frames.get_depth_frame()
37 | vard = rs.frame.get_frame_number(depth_frame)
38 | if int(vard) == int(frame_list[i][1]) :
39 | try:
40 | while True:
41 | new_frames = pipeline.wait_for_frames()
42 | color_frame = new_frames.get_color_frame()
43 | var = rs.frame.get_frame_number(color_frame)
44 | if int(var) ==int(frame_list[i][0]):
45 | depth_color = rs.colorizer().colorize(depth_frame)
46 | depth_color_image = np.asanyarray(depth_color.get_data())
47 | color_image = np.asanyarray(color_frame.get_data())
48 |
49 | color_intrin = color_frame.profile.as_video_stream_profile().intrinsics
50 | color_cvt = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
51 | print var, vard
52 | img_over = cv2.addWeighted(color_cvt, 1, depth_color_image, 0.3, 0.5)
53 | key = cv2.waitKey(0)
54 |
55 | xCoord = np.arange(0, 6, 1)
56 | yCoord = [0, 1, -3, 5, -3, 0]
57 | fig = plt.figure()
58 | ax = fig.add_subplot(111)
59 | number = 'frame number: c:{}/d:{}'.format(var,vard)
60 | markerprops = dict(marker='o', markersize=5, markeredgecolor='red')
61 | lineprops = dict(color='red', linewidth=2)
62 | plt.imshow(color_image)
63 | ax.grid(False)
64 | plt.text(500, -20, number, fontsize=15)
65 | figManager = plt.get_current_fig_manager().window.state('zoomed')
66 |
67 | ruler = Ruler(ax=ax,
68 | depth_frame=depth_frame,
69 | color_intrin=color_intrin,
70 | useblit=True,
71 | markerprops=markerprops,
72 | lineprops=lineprops,
73 | )
74 |
75 | #print 'show image'
76 | plt.show()
77 | # if pressed escape exit program
78 | if key == 27:
79 | cv2.destroyAllWindows()
80 | break
81 | else:
82 | cv2.destroyAllWindows()
83 | i+= 1
84 | break
85 |
86 | finally:
87 | pass
88 |
89 | else:
90 | pass
91 |
92 | except IndexError:
93 | print 'file ended'
94 |
95 |
96 | finally:
97 | print 'finish'
98 |
99 |
100 |
--------------------------------------------------------------------------------
/separate functions/phase 1/measure_loop.py:
--------------------------------------------------------------------------------
1 | # First import library
2 | import pyrealsense2 as rs
3 | # Import Numpy for easy array manipulation
4 | import numpy as np
5 | # Import OpenCV for easy image rendering
6 | import cv2
7 | # Import os.path for file path manipulation
8 | import os.path
9 | from matplotlib import pyplot as plt
10 | from matplotlib.widgets2 import Ruler
11 |
12 | from datetime import datetime
13 |
14 |
15 | file_name = 'record.bag'
16 | print file_name
17 | # Configure depth and color streamsq
18 | pipeline = rs.pipeline()
19 | config = rs.config()
20 | config.enable_device_from_file(file_name)
21 |
22 | # Start streaming from file
23 | profile = pipeline.start(config)
24 | device = profile.get_device()
25 | playback = device.as_playback()
26 | playback.set_real_time(False)
27 |
28 | print 'profile got'
29 | #align to color map
30 | align_to = rs.stream.color
31 | align = rs.align(align_to)
32 | print 'aligned'
33 |
34 | i = 1
35 | while i < 100:
36 | print i
37 | playback.resume()
38 | for x in range(10):
39 | pipeline.wait_for_frames()
40 |
41 | frames = pipeline.wait_for_frames()
42 | depth_frame = frames.get_depth_frame()
43 | color_frame = frames.get_color_frame()
44 | playback.pause()
45 | var = rs.frame.get_frame_number(color_frame)
46 | print 'frame number: '+ str(var)
47 | time_stamp = rs.frame.get_timestamp(color_frame)
48 | time = datetime.now()
49 | print 'timestampe: ' + str(time_stamp)
50 | domain = rs.frame.get_frame_timestamp_domain(color_frame)
51 | print domain
52 | meta = rs.frame.get_data(color_frame)
53 | print 'metadata: ' + str(meta)
54 |
55 | depth_stream = profile.get_stream(rs.stream.depth)
56 | inst = rs.video_stream_profile.intrinsics
57 | # get intrinsics of the frames
58 | depth_intrin = depth_frame.profile.as_video_stream_profile().intrinsics
59 | color_intrin = color_frame.profile.as_video_stream_profile().intrinsics
60 | depth_to_color_extrin = depth_frame.profile.get_extrinsics_to(color_frame.profile)
61 | print 'instrincts got'
62 |
63 | # Convert images to numpy arrays
64 | depth_color = rs.colorizer().colorize(depth_frame)
65 | depth_image = np.asanyarray(depth_color.get_data())
66 | color_image = np.asanyarray(color_frame.get_data())
67 | print 'got image'
68 |
69 | xCoord = np.arange(0, 6, 1)
70 | yCoord = [0, 1, -3, 5, -3, 0]
71 | fig = plt.figure()
72 | ax = fig.add_subplot(111)
73 | markerprops = dict(marker='o', markersize=5, markeredgecolor='red')
74 | lineprops = dict(color='red', linewidth=2)
75 | plt.imshow(depth_image)
76 | ax.grid(True)
77 | figManager = plt.get_current_fig_manager().window.state('zoomed')
78 |
79 |
80 |
81 | ruler = Ruler(ax=ax,
82 | depth_frame=depth_frame,
83 | color_intrin=color_intrin,
84 | useblit=True,
85 | markerprops=markerprops,
86 | lineprops=lineprops,
87 | )
88 |
89 | plt.show()
90 |
91 |
92 | cv2.waitKey(1000)
93 | cv2.destroyAllWindows()
94 |
95 | i += 1
96 |
97 |
98 |
99 |
100 |
101 |
102 |
--------------------------------------------------------------------------------
/separate functions/phase 1/measure_loop_temp.py:
--------------------------------------------------------------------------------
1 | #import library needed
2 | import pyrealsense2 as rs
3 | import numpy as np
4 | import cv2
5 | import math
6 | import sys
7 | from matplotlib import pyplot as plt
8 | from matplotlib.widgets2 import Ruler
9 |
10 | def main():
11 | question()
12 |
13 | def measure(input):
14 | file_name = input+'.bag'
15 | print file_name
16 | # Configure depth and color streams
17 | pipeline = rs.pipeline()
18 | config = rs.config()
19 | config.enable_device_from_file(file_name)
20 | # Start streaming from file
21 | profile = pipeline.start(config)
22 | device = profile.get_device()
23 | playback = device.as_playback()
24 | playback.set_real_time(False)
25 | print 'profile got'
26 | # align to color map
27 | # Define filters
28 | dec = rs.decimation_filter(1)
29 | to_dasparity = rs.disparity_transform(True)
30 | dasparity_to = rs.disparity_transform(False)
31 | spat = rs.spatial_filter()
32 | hole = rs.hole_filling_filter(2)
33 | temp = rs.temporal_filter()
34 |
35 |
36 | i = 1
37 |
38 | while i < 10:
39 | align_to = rs.stream.color
40 | align = rs.align(align_to)
41 | print 'aligned'
42 | playback.resume()
43 | frames = pipeline.wait_for_frames()
44 | aligned_frames = align.process(frames)
45 | playback.pause()
46 |
47 | depth_frame = aligned_frames.get_depth_frame()
48 | depth_frame = frames.get_depth_frame()
49 | color_frame = frames.get_color_frame()
50 | depth = dec.process(depth_frame)
51 | depth_dis = to_dasparity.process(depth)
52 | depth_spat = spat.process(depth_dis)
53 | depth_temp = temp.process(depth_spat)
54 | depth_hole = hole.process(depth_temp)
55 | depth_final = dasparity_to.process(depth_hole)
56 | depth_color = rs.colorizer().colorize(depth_final)
57 | depth_stream = profile.get_stream(rs.stream.depth)
58 | inst = rs.video_stream_profile.intrinsics
59 | # get intrinsics of the frames
60 | depth_intrin = depth_frame.profile.as_video_stream_profile().intrinsics
61 | color_intrin = color_frame.profile.as_video_stream_profile().intrinsics
62 | depth_to_color_extrin = depth_frame.profile.get_extrinsics_to(color_frame.profile)
63 | print 'instrincts got'
64 | var = rs.frame.get_frame_number(color_frame)
65 | print 'frame number: ' + str(var)
66 |
67 |
68 | # turn into numpy array
69 | depth_image = np.asanyarray(depth_color.get_data())
70 | color_image = np.asanyarray(color_frame.get_data())
71 | print 'got image'
72 |
73 |
74 |
75 | #cv2.namedWindow('Depth', cv2.WINDOW_FULLSCREEN)
76 | #cv2.imshow('Depth', depth_image)
77 | #key = cv2.waitKey(0)
78 |
79 | #if key == 27:
80 | #print 'stop'
81 | #cv2.destroyAllWindows()
82 | #break
83 |
84 | xCoord = np.arange(0, 6, 1)
85 | yCoord = [0, 1, -3, 5, -3, 0]
86 | fig = plt.figure()
87 | ax = fig.add_subplot(111)
88 |
89 | markerprops = dict(marker='o', markersize=5, markeredgecolor='red')
90 | lineprops = dict(color='red', linewidth=2)
91 | plt.imshow(depth_image)
92 | ax.grid(True)
93 | figManager = plt.get_current_fig_manager().window.state('zoomed')
94 |
95 | ruler = Ruler(ax=ax,
96 | depth_frame=depth_frame,
97 | color_intrin=color_intrin,
98 | useblit=True,
99 | markerprops=markerprops,
100 | lineprops=lineprops,
101 | )
102 |
103 | plt.show()
104 | print 'show image'
105 | decision = raw_input('next move: \n')
106 | if decision == '+':
107 | continue
108 | elif decision == 'x':
109 | i = 100
110 | print 'stop'
111 | break
112 | question()
113 |
114 | def question():
115 |
116 | x = raw_input('haben Sie noch bilder?(y/n) \n')
117 | #x= 'y'
118 | if str(x) == 'y':
119 | file_name = raw_input('filename: \n')
120 | return file_name
121 |
122 | elif str(x) == 'n':
123 | print 'exit'
124 | return
125 | else :
126 | print 'Falsche Antwort'
127 |
128 | if __name__ == '__main__':
129 | in_file = question()
130 | measure(in_file)
131 |
132 |
--------------------------------------------------------------------------------
/separate functions/phase 1/measure_loop_temp_0405.py:
--------------------------------------------------------------------------------
1 | #import library needed
2 | import pyrealsense2 as rs
3 | import numpy as np
4 | import cv2
5 | import math
6 | import sys
7 | from matplotlib import pyplot as plt
8 | from matplotlib.widgets2 import Ruler
9 |
10 | def main():
11 | question()
12 |
13 | def measure(input):
14 | file_name = input+'.bag'
15 | print file_name
16 | # Configure depth and color streams
17 | pipeline = rs.pipeline()
18 | config = rs.config()
19 | config.enable_device_from_file(file_name)
20 | # Start streaming from file
21 | profile = pipeline.start(config)
22 | device = profile.get_device()
23 | playback = device.as_playback()
24 | playback.set_real_time(False)
25 | print 'profile got'
26 | # align to color map
27 | # Define filters
28 |
29 | dec = rs.decimation_filter(1)
30 | to_dasparity = rs.disparity_transform(True)
31 | dasparity_to = rs.disparity_transform(False)
32 | spat = rs.spatial_filter()
33 | hole = rs.hole_filling_filter(2)
34 | temp = rs.temporal_filter()
35 |
36 |
37 | i = 1
38 |
39 | while i < 10:
40 | align_to = rs.stream.color
41 | align = rs.align(align_to)
42 | print 'aligned'
43 | #playback.resume()
44 | frames = pipeline.wait_for_frames()
45 | aligned_frames = align.process(frames)
46 | #playback.pause()
47 |
48 | depth_frame = aligned_frames.get_depth_frame()
49 | #depth_frame = frames.get_depth_frame()
50 | color_frame = aligned_frames.get_color_frame()
51 | #color_frame = frames.get_color_frame()
52 | depth = dec.process(depth_frame)
53 | depth_dis = to_dasparity.process(depth)
54 | depth_spat = spat.process(depth_dis)
55 | depth_temp = temp.process(depth_spat)
56 | depth_hole = hole.process(depth_temp)
57 | depth_final = dasparity_to.process(depth_hole)
58 | depth_color = rs.colorizer().colorize(depth_final)
59 | #depth_stream = profile.get_stream(rs.stream.depth)
60 | #inst = rs.video_stream_profile.intrinsics
61 | # get intrinsics of the frames
62 | #depth_intrin = depth_frame.profile.as_video_stream_profile().intrinsics
63 | color_intrin = color_frame.profile.as_video_stream_profile().intrinsics
64 | #depth_to_color_extrin = depth_frame.profile.get_extrinsics_to(color_frame.profile)
65 | print 'instrincts got'
66 | var = rs.frame.get_frame_number(color_frame)
67 | print 'frame number: ' + str(var)
68 |
69 |
70 | # turn into numpy array
71 | depth_image = np.asanyarray(depth_color.get_data())
72 | color_image = np.asanyarray(color_frame.get_data())
73 | print 'got image'
74 | #cv2.namedWindow('Align Example', cv2.WINDOW_AUTOSIZE)
75 | #cv2.imshow('Align Example', depth_image)
76 | #key = cv2.waitKey(1)
77 |
78 | #cv2.namedWindow('Depth', cv2.WINDOW_FULLSCREEN)
79 | #cv2.imshow('Depth', color_image)
80 | #key = cv2.waitKey(1)
81 |
82 |
83 |
84 | #cv2.namedWindow('Depth', cv2.WINDOW_FULLSCREEN)
85 | #cv2.imshow('Depth', depth_image)
86 | #key = cv2.waitKey(0)
87 |
88 | #if key == 27:
89 | #print 'stop'
90 | #cv2.destroyAllWindows()
91 | #break
92 |
93 | xCoord = np.arange(0, 6, 1)
94 | yCoord = [0, 1, -3, 5, -3, 0]
95 | fig = plt.figure()
96 | ax = fig.add_subplot(111)
97 |
98 | markerprops = dict(marker='o', markersize=5, markeredgecolor='red')
99 | lineprops = dict(color='red', linewidth=2)
100 | plt.imshow(color_image)
101 | ax.grid(True)
102 | plt.text(500,-20,'frame no.: '+ str(var),fontsize=15)
103 | figManager = plt.get_current_fig_manager().window.state('zoomed')
104 |
105 | ruler = Ruler(ax=ax,
106 | depth_frame=depth_frame,
107 | color_intrin=color_intrin,
108 | useblit=True,
109 | markerprops=markerprops,
110 | lineprops=lineprops,
111 | )
112 |
113 |
114 | plt.show()
115 | print 'show image'
116 |
117 | decision = raw_input('next move: \n')
118 | if decision == '+':
119 | continue
120 | elif decision == 'x':
121 | i = 100
122 | print 'stop'
123 | break
124 | print 'done'
125 | in_file = question()
126 | x = measure(in_file)
127 |
128 |
129 |
130 | def question():
131 |
132 | x = raw_input('haben Sie noch bilder?(y/n) \n')
133 | #x= 'y'
134 | if str(x) == 'y':
135 | file_name = raw_input('filename: \n')
136 | return file_name
137 |
138 | elif str(x) == 'n':
139 | print 'exit'
140 | sys.exit()
141 |
142 | else :
143 | print 'Falsche Antwort'
144 | question()
145 |
146 | if __name__ == '__main__':
147 | in_file = question()
148 | x = measure(in_file)
149 | print x
150 |
151 |
--------------------------------------------------------------------------------
/separate functions/phase 1/measure_read_per_frame.py:
--------------------------------------------------------------------------------
1 | #import library needed
2 | import pyrealsense2 as rs
3 | import numpy as np
4 | import cv2
5 | import math
6 | import sys
7 |
8 | #message box
9 | import ctypes
10 | from ctypes import c_int, WINFUNCTYPE, windll
11 | from ctypes.wintypes import HWND, LPCWSTR, UINT
12 | prototype = WINFUNCTYPE(c_int, HWND, LPCWSTR, LPCWSTR, UINT)
13 | paramflags = (1, "hwnd", 0), (1, "text", "Hi"), (1, "caption", "Result"), (1, "flags", 0)
14 | MessageBox = prototype(("MessageBoxW", windll.user32), paramflags)
15 |
16 | file_name = raw_input('name of the file: \n')+ '.bag'
17 | print file_name
18 | # Configure depth and color streams
19 | pipeline = rs.pipeline()
20 | config = rs.config()
21 | config.enable_device_from_file(file_name)
22 | config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
23 | config.enable_stream(rs.stream.color, 1920, 1080, rs.format.rgba8, 30)
24 | # Start streaming from file
25 | profile = pipeline.start(config)
26 |
27 | # set up sensor
28 | dev = profile.get_device()
29 | dev.as_playback().set_real_time(False)
30 | depth_sensor = dev.first_depth_sensor()
31 | cam_range = depth_sensor.get_option_range(rs.option.visual_preset)
32 | preset_name = depth_sensor.get_option_value_description(rs.option.visual_preset, 4)
33 | print 'sensor set'
34 | #align to color map
35 | align_to = rs.stream.color
36 | align = rs.align(align_to)
37 | print 'aligned'
38 | # Skip 5 first frames to give the Auto-Exposure time to adjust
39 | for x in range(5):
40 | pipeline.wait_for_frames()
41 |
42 | # Streaming loop
43 | try:
44 | while True:
45 | # Get frameset of depth
46 | frames = pipeline.wait_for_frames()
47 | aligned_frames = align.process(frames)
48 | depth_frame = aligned_frames.get_depth_frame()
49 | color_frame = frames.get_color_frame()
50 | var = rs.frame.get_frame_number(color_frame)
51 | print var
52 | #colorize depth frame
53 | depth_color = rs.colorizer().colorize(depth_frame)
54 | print 'frame colorized'
55 |
56 | # get depth stream data
57 | depth_stream = profile.get_stream(rs.stream.depth)
58 | inst = rs.video_stream_profile.intrinsics
59 | #get intrinsics of the frames
60 | depth_intrin = depth_frame.profile.as_video_stream_profile().intrinsics
61 | color_intrin = color_frame.profile.as_video_stream_profile().intrinsics
62 | depth_to_color_extrin = depth_frame.profile.get_extrinsics_to(color_frame.profile)
63 | print 'instrincts got'
64 |
65 | # Convert images to numpy arrays
66 | depth_image = np.asanyarray(depth_color.get_data())
67 | color_image = np.asanyarray(color_frame.get_data())
68 | print 'got image'
69 |
70 |
71 | def calculate_3D (xi, yi, x0, y0):
72 | udist = depth_frame.get_distance(xi, yi)
73 | vdist = depth_frame.get_distance(x0, y0)
74 | print udist, vdist
75 | point1 = rs.rs2_deproject_pixel_to_point(color_intrin, [xi, yi], udist)
76 | point2 = rs.rs2_deproject_pixel_to_point(color_intrin, [x0, y0], vdist)
77 | print 'start(x,y,z): '+ str(point1)+'\n' + 'end(x,y,z): ' +str(point2)
78 | dist = math.sqrt(
79 | math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1],2) + math.pow(
80 | point1[2] - point2[2], 2))
81 | cm = dist * 100
82 | decimal2 = "%.2f" % cm
83 | print 'Vermessung: ' + str(decimal2)+ 'cm'
84 | MessageBox(text='Vermessung: ' + decimal2 + ' cm')
85 |
86 | def measure (event,x,y,flags,param):
87 | global xi,yi,x0,y0
88 | if event == cv2.EVENT_LBUTTONDOWN:
89 | xi,yi = x,y
90 | print 'xi,yi= ' + str(xi) + ',' + str(yi)
91 | return xi,yi
92 |
93 | elif event == cv2.EVENT_LBUTTONUP:
94 | x0,y0 = x,y
95 | print 'x0,y0= ' + str(x0) + ',' + str(y0)
96 | calculate_3D(xi, yi, x0, y0)
97 | return x0,y0
98 |
99 | print 'show window'
100 | cv2.namedWindow('Color', cv2.WINDOW_AUTOSIZE)
101 | cv2.namedWindow('Depth', cv2.WINDOW_AUTOSIZE)
102 | cv2.setMouseCallback('Color', measure)
103 | cv2.setMouseCallback('Depth', measure)
104 | cv2.imshow('Color',color_image)
105 | cv2.imshow('Depth',depth_image)
106 |
107 | key = cv2.waitKey(0)
108 | # Press esc or 'q' to close the image window
109 | if key & 0xFF == ord('q') or key == 27:
110 | cv2.destroyAllWindows()
111 | print 'break'
112 | break
113 | elif key == 32:
114 | cv2.destroyAllWindows()
115 | continue
116 | finally:
117 |
118 | pipeline.stop()
119 | print 'stopped'
120 |
--------------------------------------------------------------------------------
/separate functions/phase 1/measure_search.py:
--------------------------------------------------------------------------------
1 | import pyrealsense2 as rs
2 | # Import Numpy for easy array manipulation
3 | import numpy as np
4 | # Import OpenCV for easy image rendering
5 | import cv2
6 | import sys
7 | from matplotlib import pyplot as plt
8 | from matplotlib.widgets2 import Ruler
9 | import os
10 |
11 |
12 | num = raw_input('Wegenummer:\n')
13 | file_name = './bag/'+num+'.bag'
14 | pipeline = rs.pipeline()
15 | config = rs.config()
16 | config.enable_device_from_file(file_name)
17 | config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
18 | config.enable_stream(rs.stream.color, 1280, 720, rs.format.rgba8, 30)
19 | profile = pipeline.start(config)
20 | device = profile.get_device()
21 | playback = device.as_playback()
22 | playback.set_real_time(False)
23 | align_to = rs.stream.color
24 | align = rs.align(align_to)
25 | frame_list = []
26 |
27 | with open('./list/'+num+'_matched.txt','r') as csvfile:
28 | for line in csvfile:
29 | frame = [elt.strip() for elt in line.split(',')]
30 | frame_list.append(frame)
31 | i=0
32 |
33 | search_color = raw_input('Color Frame Nummer')
34 | search_depth = raw_input('Depth Frame Nummmer')
35 |
36 | try:
37 | while True:
38 | frames = pipeline.wait_for_frames()
39 | aligned_frames = align.process(frames)
40 | depth_frame = aligned_frames.get_depth_frame()
41 | vard = rs.frame.get_frame_number(depth_frame)
42 | if int(vard) == int(search_depth) :
43 | try:
44 | while True:
45 |
46 | new_frames = pipeline.wait_for_frames()
47 | color_frame = new_frames.get_color_frame()
48 | var = rs.frame.get_frame_number(color_frame)
49 | if int(var) ==int(search_color):
50 | depth_color = rs.colorizer().colorize(depth_frame)
51 | depth_color_image = np.asanyarray(depth_color.get_data())
52 | color_image = np.asanyarray(color_frame.get_data())
53 |
54 | color_intrin = color_frame.profile.as_video_stream_profile().intrinsics
55 | color_cvt = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
56 | print var, vard
57 | img_over = cv2.addWeighted(color_cvt, 1, depth_color_image, 0.3, 0.5)
58 | key = cv2.waitKey(0)
59 |
60 | xCoord = np.arange(0, 6, 1)
61 | yCoord = [0, 1, -3, 5, -3, 0]
62 | fig = plt.figure()
63 | ax = fig.add_subplot(111)
64 |
65 | number = 'frame number: c:' + str(var) + '/d:' + str(vard)
66 | markerprops = dict(marker='o', markersize=5, markeredgecolor='red')
67 | lineprops = dict(color='red', linewidth=2)
68 | plt.imshow(img_over)
69 | ax.grid(False)
70 | plt.text(500, -20, number, fontsize=15)
71 | figManager = plt.get_current_fig_manager().window.state('zoomed')
72 |
73 | ruler = Ruler(ax=ax,
74 | depth_frame=depth_frame,
75 | color_intrin=color_intrin,
76 | useblit=True,
77 | markerprops=markerprops,
78 | lineprops=lineprops,
79 | )
80 |
81 | #print 'show image'
82 | plt.show()
83 | # if pressed escape exit program
84 | if key == 27:
85 | cv2.destroyAllWindows()
86 | break
87 | else:
88 | cv2.destroyAllWindows()
89 | i+= 1
90 | break
91 |
92 | finally:
93 | pass
94 |
95 | else:
96 | pass
97 |
98 | except IndexError:
99 | print 'file ended'
100 |
101 |
102 | finally:
103 | print 'finish'
104 |
105 |
106 |
--------------------------------------------------------------------------------
/separate functions/phase 1/measure_stream.py:
--------------------------------------------------------------------------------
1 | import pyrealsense2 as rs
2 | import numpy as np
3 | from matplotlib import pyplot as plt
4 | import cv2
5 | import math
6 | from matplotlib import pyplot as plt
7 |
8 |
9 | def draw_circle(event,x,y,flags,param):
10 | global ix,iy,drawing,mode
11 |
12 | if event == cv2.EVENT_LBUTTONDOWN:
13 | drawing = True
14 | ix,iy = x,y
15 |
16 | elif event == cv2.EVENT_LBUTTONUP:
17 | drawing = False
18 | x0, y0 = x, y
19 | print 'ixiy=' + str(ix)+','+str(iy)
20 | print 'x0y0=' + str(x0) + ',' + str(y0)
21 | udist = depth_frame.get_distance(ix, iy)
22 | vdist = depth_frame.get_distance(x0, y0)
23 | print udist,vdist
24 |
25 | point1 = rs.rs2_deproject_pixel_to_point(color_intrin, [ix, iy], udist)
26 | point2 = rs.rs2_deproject_pixel_to_point(color_intrin, [x0, y0], vdist)
27 | print str(point1)+str(point2)
28 | dist = math.sqrt(
29 | math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1],2) + math.pow(
30 | point1[2] - point2[2], 2))
31 | print 'distance: '+ str(dist)
32 | if mode == True:
33 | cv2.line(images,(ix,iy),(x,y),(0,255,0),100)
34 | else:
35 | cv2.circle(images,(x,y),5,(0,0,255),100)
36 |
37 | # Configure depth and color streams
38 | pipeline = rs.pipeline()
39 | config = rs.config()
40 | config.enable_stream(rs.stream.depth, 1280,720, rs.format.z16, 30)
41 | config.enable_stream(rs.stream.color, 1920,1080, rs.format.bgr8, 30)
42 | # Start streaming from file
43 | profile = pipeline.start(config)
44 |
45 | #Define filters
46 | dec = rs.decimation_filter(1)
47 | to_dasparity = rs.disparity_transform(True)
48 | dasparity_to = rs.disparity_transform(False)
49 | spat = rs.spatial_filter()
50 | hole = rs.hole_filling_filter(2)
51 | temp = rs.temporal_filter()
52 |
53 | #sensor settings
54 | dev = profile.get_device()
55 | depth_sensor = dev.first_depth_sensor()
56 | depth_sensor.set_option(rs.option.visual_preset, 4)
57 | range = depth_sensor.get_option_range(rs.option.visual_preset)
58 | preset_name = depth_sensor.get_option_value_description(rs.option.visual_preset, 4)
59 |
60 | #align to color map
61 | align_to = rs.stream.color
62 | align = rs.align(align_to)
63 |
64 | try:
65 | while True:
66 | # Get frameset of depth
67 | frames = pipeline.wait_for_frames()
68 | aligned_frames = align.process(frames)
69 | global depth_frame
70 | depth_frame = aligned_frames.get_depth_frame()
71 | color_frame = frames.get_color_frame()
72 | depth = dec.process(depth_frame)
73 | depth_dis = to_dasparity.process(depth)
74 | depth_spat = spat.process(depth_dis)
75 | depth_temp = temp.process(depth_spat)
76 | depth_hole = hole.process(depth_temp)
77 | global depth_final
78 | depth_final = dasparity_to.process(depth_hole)
79 | depth_color = rs.colorizer().colorize(depth_final)
80 |
81 | #depth stream
82 | depth_stream = profile.get_stream(rs.stream.depth)
83 | inst = rs.video_stream_profile.intrinsics
84 |
85 | depth_intrin = depth_frame.profile.as_video_stream_profile().intrinsics
86 | color_intrin = color_frame.profile.as_video_stream_profile().intrinsics
87 | depth_to_color_extrin = depth_frame.profile.get_extrinsics_to(color_frame.profile)
88 |
89 | # Convert images to numpy arrays
90 | depth_image = np.asanyarray(depth_color.get_data())
91 | color_image = np.asanyarray(color_frame.get_data())
92 | color_cvt = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
93 | img_over = cv2.addWeighted(color_cvt, 0.8, depth_image, 0.3, 0)
94 | images = color_image
95 |
96 | font = cv2.FONT_HERSHEY_SIMPLEX
97 | cv2.putText(images, preset_name, (60, 80), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
98 |
99 | drawing = False # true if mouse is pressed
100 | mode = True # if True, draw rectangle. Press 'm' to toggle to curve
101 |
102 | cv2.namedWindow('RealSense', cv2.WINDOW_FULLSCREEN)
103 | cv2.setMouseCallback('RealSense',draw_circle)
104 | cv2.imshow('RealSense', img_over)
105 | cv2.waitKey(1)
106 | finally:
107 | pipeline.stop()
108 |
109 |
110 |
--------------------------------------------------------------------------------
/separate functions/phase 1/measure_synced.py:
--------------------------------------------------------------------------------
1 | # First import library
2 | import pyrealsense2 as rs
3 | # Import Numpy for easy array manipulation
4 | import numpy as np
5 | # Import OpenCV for easy image rendering
6 | import cv2
7 | import sys
8 | from matplotlib import pyplot as plt
9 | from matplotlib.widgets2 import Ruler
10 |
11 | file_name = '3.bag'
12 | print file_name
13 | # Configure depth and color streams
14 | pipeline = rs.pipeline()
15 | config = rs.config()
16 | config.enable_device_from_file(file_name)
17 | config.enable_all_streams()
18 | # Start streaming from file
19 | profile = pipeline.start(config)
20 | device = profile.get_device()
21 | playback = device.as_playback()
22 | playback.set_real_time(False)
23 | # print 'profile got'
24 | # align to color map
25 | align_to = rs.stream.color
26 | align = rs.align(align_to)
27 | # print 'aligned'
28 | frame_list = []
29 |
30 | with open('matched.txt','r') as csvfile:
31 | for line in csvfile:
32 | frame = [elt.strip() for elt in line.split(',')]
33 | frame_list.append(frame)
34 | #print frame_list
35 | i=0
36 | try:
37 | while True:
38 | frames = pipeline.wait_for_frames()
39 | aligned_frames = align.process(frames)
40 | depth_frame = aligned_frames.get_depth_frame()
41 | vard = rs.frame.get_frame_number(depth_frame)
42 |
43 | #print 'vard' +str(vard)
44 | #print 'list'+ str(frame_list[i][0])
45 |
46 | if int(vard) == int(frame_list[i][1]) :
47 | #print 'matched'
48 | try:
49 | while True:
50 | new_frames = pipeline.wait_for_frames()
51 | color_frame = new_frames.get_color_frame()
52 | var = rs.frame.get_frame_number(color_frame)
53 | #print var
54 | if int(var) ==int(frame_list[i][0]):
55 | depth_color = rs.colorizer().colorize(depth_frame)
56 | depth_color_image = np.asanyarray(depth_color.get_data())
57 | color_image = np.asanyarray(color_frame.get_data())
58 |
59 | color_intrin = color_frame.profile.as_video_stream_profile().intrinsics
60 | color_cvt = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
61 | print var, vard
62 | img_over = cv2.addWeighted(color_cvt, 1, depth_color_image, 0.3, 0.5)
63 | #cv2.namedWindow("Color Stream", cv2.WINDOW_FULLSCREEN)
64 | #cv2.namedWindow("Depth Stream", cv2.WINDOW_AUTOSIZE)
65 | #cv2.imshow("Color Stream", img_over)
66 | #cv2.imshow("Depth Stream", depth_color_image)
67 | key = cv2.waitKey(0)
68 |
69 | xCoord = np.arange(0, 6, 1)
70 | yCoord = [0, 1, -3, 5, -3, 0]
71 | fig = plt.figure()
72 | ax = fig.add_subplot(111)
73 |
74 | number = 'frame number: c:' + str(var) + '/d:' + str(vard)
75 | markerprops = dict(marker='o', markersize=5, markeredgecolor='red')
76 | lineprops = dict(color='red', linewidth=2)
77 | plt.imshow(img_over)
78 | ax.grid(False)
79 | plt.text(500, -20, number, fontsize=15)
80 | figManager = plt.get_current_fig_manager().window.state('zoomed')
81 |
82 | ruler = Ruler(ax=ax,
83 | depth_frame=depth_frame,
84 | color_intrin=color_intrin,
85 | useblit=True,
86 | markerprops=markerprops,
87 | lineprops=lineprops,
88 | )
89 |
90 | #print 'show image'
91 | plt.show()
92 | # if pressed escape exit program
93 | if key == 27:
94 | cv2.destroyAllWindows()
95 | break
96 | else:
97 | cv2.destroyAllWindows()
98 | i+= 1
99 | break
100 |
101 | finally:
102 | pass
103 |
104 | else:
105 | pass
106 |
107 | except IndexError:
108 | print 'file ended'
109 |
110 |
111 | finally:
112 | print 'finish'
113 | pipeline.stop()
114 |
115 |
--------------------------------------------------------------------------------
/separate functions/phase 1/measure_with_mpl.py:
--------------------------------------------------------------------------------
1 | #import library needed
2 | import pyrealsense2 as rs
3 | import numpy as np
4 | import cv2
5 | import math
6 | import sys
7 | from matplotlib import pyplot as plt
8 | from matplotlib.widgets2 import Ruler
9 |
10 | #message box
11 | import ctypes
12 | from ctypes import c_int, WINFUNCTYPE, windll
13 | from ctypes.wintypes import HWND, LPCWSTR, UINT
14 | prototype = WINFUNCTYPE(c_int, HWND, LPCWSTR, LPCWSTR, UINT)
15 | paramflags = (1, "hwnd", 0), (1, "text", "Hi"), (1, "caption", "Result"), (1, "flags", 0)
16 | MessageBox = prototype(("MessageBoxW", windll.user32), paramflags)
17 |
18 | file_name = 'C:\Users\cyh\Documents\phase2\\bag\\3.bag'
19 | print file_name
20 | # Configure depth and color streams
21 | pipeline = rs.pipeline()
22 | config = rs.config()
23 | config.enable_device_from_file(file_name)
24 |
25 | # Start streaming from file
26 | profile = pipeline.start(config)
27 | profile.get_device().as_playback().set_real_time(True)
28 | print 'profile got'
29 | #align to color map
30 | align_to = rs.stream.color
31 | align = rs.align(align_to)
32 | print 'aligned'
33 | # Skip 5 first frames to give the Auto-Exposure time to adjust
34 | for x in range(5):
35 | pipeline.wait_for_frames()
36 |
37 | # Streaming loop
38 | try:
39 | while True:
40 | #set up sensor
41 | dev = profile.get_device()
42 | depth_sensor = dev.first_depth_sensor()
43 | range = depth_sensor.get_option_range(rs.option.visual_preset)
44 | preset_name = depth_sensor.get_option_value_description(rs.option.visual_preset, 4)
45 | print 'sensor set'
46 |
47 | # Get frameset of depth
48 | frames = pipeline.wait_for_frames()
49 | aligned_frames = align.process(frames)
50 | depth_frame = aligned_frames.get_depth_frame()
51 | color_frame = frames.get_color_frame()
52 | #colorize depth frame
53 | depth_color = rs.colorizer().colorize(depth_frame)
54 | print 'frame colorized'
55 |
56 | # get depth stream data
57 | depth_stream = profile.get_stream(rs.stream.depth)
58 | inst = rs.video_stream_profile.intrinsics
59 | #get intrinsics of the frames
60 | depth_intrin = depth_frame.profile.as_video_stream_profile().intrinsics
61 | color_intrin = color_frame.profile.as_video_stream_profile().intrinsics
62 | depth_to_color_extrin = depth_frame.profile.get_extrinsics_to(color_frame.profile)
63 | print 'instrincts got'
64 |
65 | # Convert images to numpy arrays
66 | depth_image = np.asanyarray(depth_color.get_data())
67 | color_image = np.asanyarray(color_frame.get_data())
68 | print 'got image'
69 |
70 |
71 | def calculate_3D (xi, yi, x0, y0):
72 | udist = depth_frame.get_distance(xi, yi)
73 | vdist = depth_frame.get_distance(x0, y0)
74 | print udist, vdist
75 | point1 = rs.rs2_deproject_pixel_to_point(color_intrin, [xi, yi], udist)
76 | point2 = rs.rs2_deproject_pixel_to_point(color_intrin, [x0, y0], vdist)
77 | print 'start(x,y,z): '+ str(point1)+'\n' + 'end(x,y,z): ' +str(point2)
78 | dist = math.sqrt(
79 | math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1],2) + math.pow(
80 | point1[2] - point2[2], 2))
81 | cm = dist * 100
82 | decimal2 = "%.2f" % cm
83 | print 'Vermessung: ' + str(decimal2)+ 'cm'
84 | MessageBox(text='Vermessung: ' + decimal2 + ' cm')
85 |
86 | def measure (event,x,y,flags,param):
87 | global xi,yi,x0,y0
88 | if event == cv2.EVENT_LBUTTONDOWN:
89 | xi,yi = x,y
90 | print 'xi,yi= ' + str(xi) + ',' + str(yi)
91 | return xi,yi
92 |
93 | elif event == cv2.EVENT_LBUTTONUP:
94 | x0,y0 = x,y
95 | print 'x0,y0= ' + str(x0) + ',' + str(y0)
96 | calculate_3D(xi, yi, x0, y0)
97 | return x0,y0
98 |
99 | print 'show window'
100 | xCoord = np.arange(0, 6, 1)
101 | yCoord = [0, 1, -3, 5, -3, 0]
102 | fig = plt.figure()
103 | ax = fig.add_subplot(111)
104 | markerprops = dict(marker='o', markersize=5, markeredgecolor='red')
105 | lineprops = dict(color='red', linewidth=2)
106 | plt.imshow(color_image)
107 | ax.grid(True)
108 | cv2.setMouseCallback('Figure 1', measure)
109 |
110 | ruler = Ruler(ax=ax,
111 | depth_frame=depth_frame,
112 | color_intrin=color_intrin,
113 | useblit=True,
114 | markerprops=markerprops,
115 | lineprops=lineprops,
116 | )
117 |
118 | plt.show()
119 |
120 |
121 | key = cv2.waitKey(0)
122 | # Press esc or 'q' to close the image window
123 | if key & 0xFF == ord('q') or key == 27:
124 | cv2.destroyAllWindows()
125 | print 'break'
126 | break
127 | finally:
128 |
129 | pipeline.stop()
130 | print 'stopped'
131 |
--------------------------------------------------------------------------------
/separate functions/phase 1/read_bag.py:
--------------------------------------------------------------------------------
1 | import pyrealsense2 as rs
2 | # Import Numpy for easy array manipulation
3 | import numpy as np
4 | # Import OpenCV for easy image rendering
5 | import cv2
6 |
7 |
8 |
9 | def read_bag(filename):
10 | """a simple function for reading the input file and show in opencv window
11 | the current issue is that pipeline.stop still freeze while set_real_time is False"""
12 |
13 | pipeline = rs.pipeline()
14 | config = rs.config()
15 | # Tell config that we will use a recorded device from filem to be used by the pipeline through playback.
16 | config.enable_device_from_file(filename)
17 | # Configure the pipeline to stream the depth stream
18 | config.enable_all_streams()
19 | # Start streaming from file
20 | profile = pipeline.start(config)
21 | device = profile.get_device()
22 | playback = device.as_playback()
23 | playback.set_real_time(False)
24 | try:
25 | # Streaming loop
26 | while True:
27 | # Get frameset of depth
28 | frames = pipeline.wait_for_frames()
29 |
30 | # Get depth frame
31 | depth_frame = frames.get_depth_frame()
32 | color_frame = frames.get_color_frame()
33 |
34 | # Colorize depth frame to jet colormap
35 | depth_color_frame = rs.colorizer().colorize(depth_frame)
36 |
37 | # Convert depth_frame to numpy array to render image in opencv
38 | depth_color_image = np.asanyarray(depth_color_frame.get_data())
39 | color_image = np.asanyarray(color_frame.get_data())
40 | color_cvt = cv2.cvtColor(color_image,cv2.COLOR_BGR2RGB)
41 |
42 | # Render image in opencv window
43 | # Create opencv window to render image in
44 | cv2.namedWindow("Color Stream", cv2.WINDOW_AUTOSIZE)
45 | cv2.imshow("Color Stream",color_cvt)
46 | cv2.imshow("Depth Stream", depth_color_image)
47 | key = cv2.waitKey(100)
48 | # if pressed escape or q, exit program
49 | if key & 0xFF == ord('q') or key == 27:
50 | cv2.destroyAllWindows()
51 | break
52 |
53 | finally:
54 | print 'end'
55 | pipeline.stop()
56 |
57 | def main():
58 | filename= raw_input('file full path: \n')
59 | read_bag(filename)
60 |
61 | if __name__ == '__main__':
62 | main()
--------------------------------------------------------------------------------
/separate functions/phase 1/record_loop.py:
--------------------------------------------------------------------------------
1 | import pyrealsense2 as rs
2 | import numpy as np
3 | from matplotlib import pyplot as plt
4 | import cv2
5 | import time
6 | import sys
7 |
8 |
9 | def main():
10 | question()
11 |
12 |
13 | def record():
14 | global time_name
15 | time_name = time.strftime('%d_%m_%H_%M')
16 | file_name = str(time_name) + '.bag'
17 | print file_name
18 | pipeline = rs.pipeline()
19 | config = rs.config()
20 | config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
21 | config.enable_stream(rs.stream.color, 1280, 720, rs.format.rgba8, 30)
22 | config.enable_record_to_file(file_name)
23 | profile = pipeline.start(config)
24 | # get device and recorder
25 | device = profile.get_device()
26 | recorder = device.as_recorder()
27 | rs.recorder.pause(recorder)
28 | depth_sensor = device.first_depth_sensor()
29 | depth_sensor.set_option(rs.option.visual_preset, 4)
30 | range = depth_sensor.get_option_range(rs.option.visual_preset)
31 | preset_name = depth_sensor.get_option_value_description(rs.option.visual_preset, 4)
32 | # print preset_name
33 |
34 | time_1 = time.time()
35 | try:
36 | while True:
37 | # Get frameset of depth
38 | frames = pipeline.wait_for_frames()
39 | depth_frame = frames.get_depth_frame()
40 | color_frame = frames.get_color_frame()
41 | if not depth_frame or not color_frame:
42 | continue
43 | depth_color_frame = rs.colorizer().colorize(depth_frame)
44 | depth_image = np.asanyarray(depth_color_frame.get_data())
45 | depth_colormap_resize = cv2.resize(depth_image,(424,240))
46 | color_image = np.asanyarray(color_frame.get_data())
47 | color_cvt = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
48 | color_cvt_2 = cv2.resize(color_cvt, (424,318))
49 | images = np.vstack((color_cvt_2, depth_colormap_resize))
50 | cv2.namedWindow('Color', cv2.WINDOW_AUTOSIZE)
51 | cv2.imshow('Color', images)
52 | var = rs.frame.get_frame_number(color_frame)
53 | key = cv2.waitKey(1)
54 |
55 | time_2 = time.time()
56 | gap = time_2 - time_1
57 |
58 | if key == 27:
59 | print 'stopped'
60 | cv2.destroyAllWindows()
61 | break
62 |
63 | elif gap > 180:
64 |
65 | print 'done'
66 | cv2.destroyAllWindows()
67 | break
68 | elif key == 32:
69 | rs.recorder.resume(recorder)
70 | print 'start time: ' + str(gap)
71 | frames = pipeline.wait_for_frames()
72 | depth_frame = frames.get_depth_frame()
73 | color_frame = frames.get_color_frame()
74 | var = rs.frame.get_frame_number(color_frame)
75 | print 'frame number: ' + str(var)
76 | rs.recorder.pause(recorder)
77 |
78 | elif var % 300 < 1:
79 | rs.recorder.resume(recorder)
80 | print 'start time: ' + str(gap)
81 | frames = pipeline.wait_for_frames()
82 | depth_frame = frames.get_depth_frame()
83 | color_frame = frames.get_color_frame()
84 | var = rs.frame.get_frame_number(color_frame)
85 | print 'saved frame number: ' + str(var)
86 | rs.recorder.pause(recorder)
87 | # print 'pause time: ' + str(gap)
88 |
89 | finally:
90 | pipeline.stop()
91 |
92 | pass
93 |
94 |
95 | def question():
96 | x = raw_input('next file? \n')
97 | if str(x) == 'no':
98 | sys.exit('fertig')
99 | else:
100 | record()
101 | question()
102 |
103 |
104 | if __name__ == '__main__':
105 | question()
106 |
--------------------------------------------------------------------------------
/separate functions/phase 1/save-translate.py:
--------------------------------------------------------------------------------
1 | import pyrealsense2 as rs
2 | import numpy as np
3 | from matplotlib import pyplot as plt
4 | import cv2
5 | import math
6 | from matplotlib import pyplot as plt
7 | from matplotlib.widgets import Ruler
8 |
9 | # Configure depth and color streams
10 | pipeline = rs.pipeline()
11 | config = rs.config()
12 | config.enable_stream(rs.stream.depth, 1280,720, rs.format.z16, 30)
13 | config.enable_stream(rs.stream.color, 1280,720, rs.format.bgr8, 30)
14 | # Start streaming from file
15 | profile = pipeline.start(config)
16 |
17 | #Define filters
18 | dec = rs.decimation_filter(1)
19 | to_dasparity = rs.disparity_transform(True)
20 | dasparity_to = rs.disparity_transform(False)
21 | spat = rs.spatial_filter()
22 | hole = rs.hole_filling_filter(2)
23 | temp = rs.temporal_filter()
24 |
25 | #sensor settings
26 | dev = profile.get_device()
27 | depth_sensor = dev.first_depth_sensor()
28 | depth_sensor.set_option(rs.option.visual_preset, 4)
29 | range = depth_sensor.get_option_range(rs.option.visual_preset)
30 | preset_name = depth_sensor.get_option_value_description(rs.option.visual_preset, 4)
31 |
32 | #align to color map
33 | align_to = rs.stream.color
34 | align = rs.align(align_to)
35 |
36 | # Get frameset of depth
37 | frames = pipeline.wait_for_frames()
38 | aligned_frames = align.process(frames)
39 | global depth_frame
40 | depth_frame = aligned_frames.get_depth_frame()
41 | color_frame = frames.get_color_frame()
42 | depth = dec.process(depth_frame)
43 | depth_dis = to_dasparity.process(depth)
44 | depth_spat = spat.process(depth_dis)
45 | depth_temp = temp.process(depth_spat)
46 | depth_hole = hole.process(depth_temp)
47 | global depth_final
48 | depth_final = dasparity_to.process(depth_hole)
49 | depth_color = rs.colorizer().colorize(depth_final)
50 |
51 | #depth stream
52 | depth_stream = profile.get_stream(rs.stream.depth)
53 | inst = rs.video_stream_profile.intrinsics
54 |
55 | depth_intrin = depth_frame.profile.as_video_stream_profile().intrinsics
56 | color_intrin = color_frame.profile.as_video_stream_profile().intrinsics
57 | depth_to_color_extrin = depth_frame.profile.get_extrinsics_to(color_frame.profile)
58 |
59 | # Convert images to numpy arrays
60 | depth_image = np.asanyarray(depth_color.get_data())
61 | color_image = np.asanyarray(color_frame.get_data())
62 |
63 | # Apply colormap on depth image (image must be converted to 8-bit per pixel first)
64 | #depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, None, 0.5, 0), cv2.COLORMAP_JET)
65 |
66 | # Stack both images horizontally
67 | images = color_image
68 |
69 | font = cv2.FONT_HERSHEY_SIMPLEX
70 | cv2.putText(images, preset_name, (60, 80), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
71 |
72 |
73 | drawing = False # true if mouse is pressed
74 | mode = True # if True, draw rectangle. Press 'm' to toggle to curve
75 |
76 | def draw_circle(event,x,y,flags,param):
77 | global ix,iy,drawing,mode
78 |
79 | if event == cv2.EVENT_LBUTTONDOWN:
80 | drawing = True
81 | ix,iy = x,y
82 |
83 | elif event == cv2.EVENT_LBUTTONUP:
84 | drawing = False
85 | x0, y0 = x, y
86 | print 'ixiy=' + str(ix)+','+str(iy)
87 | print 'x0y0=' + str(x0) + ',' + str(y0)
88 | udist = depth_frame.get_distance(ix, iy)
89 | vdist = depth_frame.get_distance(x0, y0)
90 | print udist,vdist
91 |
92 | point1 = rs.rs2_deproject_pixel_to_point(color_intrin, [ix, iy], udist)
93 | point2 = rs.rs2_deproject_pixel_to_point(color_intrin, [x0, y0], vdist)
94 | print point1,point2
95 | dist = math.sqrt(
96 | math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1],2) + math.pow(
97 | point1[2] - point2[2], 2))
98 | print 'distance: '+ str(dist)
99 | if mode == True:
100 | cv2.line(images,(ix,iy),(x,y),(0,255,0),100)
101 | else:
102 | cv2.circle(images,(x,y),5,(0,0,255),100)
103 |
104 |
105 | cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
106 | cv2.setMouseCallback('RealSense',draw_circle)
107 | cv2.imshow('RealSense', images)
108 | cv2.waitKey()
109 |
110 |
111 |
112 |
--------------------------------------------------------------------------------
/separate functions/phase 1/to_frame_script.py:
--------------------------------------------------------------------------------
1 | # First import library
2 | import pyrealsense2 as rs
3 | # Import Numpy for easy array manipulation
4 | import numpy as np
5 | # Import OpenCV for easy image rendering
6 | import cv2
7 | # Import os.path for file path manipulation
8 | import os.path
9 |
10 |
11 | file_name = '0327.bag'
12 | print file_name
13 | # Configure depth and color streams
14 | pipeline = rs.pipeline()
15 | config = rs.config()
16 | config.enable_device_from_file(file_name)
17 |
18 | # Start streaming from file
19 | profile = pipeline.start(config)
20 | device = profile.get_device()
21 | playback = device.as_playback()
22 | playback.set_real_time(False)
23 | print 'profile got'
24 | #align to color map
25 | align_to = rs.stream.color
26 | align = rs.align(align_to)
27 | print 'aligned'
28 | # Skip 5 first frames to give the Auto-Exposure time to adjust
29 | for x in range(5):
30 | pipeline.wait_for_frames()
31 | i = 1
32 |
33 | frame_number = input('no.? \n')
34 |
35 |
36 | for x in range(frame_number):
37 | pipeline.wait_for_frames()
38 | frames = pipeline.wait_for_frames()
39 | depth_frame = frames.get_depth_frame()
40 | color_frame = frames.get_color_frame()
41 | playback.pause()
42 | # Colorize depth frame to jet colormap
43 | depth_color_frame = rs.colorizer().colorize(depth_frame)
44 | # Convert depth_frame to numpy array to render image in opencv
45 | depth_color_image = np.asanyarray(depth_color_frame.get_data())
46 | color_image = np.asanyarray(color_frame.get_data())
47 | cv2.namedWindow("Color Stream", cv2.WINDOW_AUTOSIZE)
48 | cv2.imshow('Color Stream', color_image)
49 | cv2.waitKey(0)
50 |
51 | playback.resume()
52 | pipeline.stop()
53 | pipeline.start(config)
54 |
--------------------------------------------------------------------------------
/separate functions/photo_capture.py:
--------------------------------------------------------------------------------
1 | import pyrealsense2 as rs
2 | import numpy as np
3 | import cv2
4 | import serial
5 | import datetime
6 | import threading
7 | import time
8 | import os
9 | from math import sin, cos, sqrt, atan2, radians
10 |
11 | def port_check():
12 | serialPort = serial.Serial()
13 | serialPort.baudrate = 4800
14 | serialPort.bytesize = serial.EIGHTBITS
15 | serialPort.parity = serial.PARITY_NONE
16 | serialPort.timeout = 2
17 | exist_port = None
18 | for x in range(1, 10):
19 | portnum = 'COM{}'.format(x)
20 | serialPort.port = 'COM{}'.format(x)
21 | try:
22 | serialPort.open()
23 | serialPort.close()
24 | exist_port = portnum
25 | except serial.SerialException:
26 | pass
27 | finally:
28 | pass
29 | if exist_port:
30 | return exist_port
31 | else:
32 | print 'close other programs using gps'
33 | raw_input('press enter to leave')
34 | os._exit(0)
35 |
36 | def emptykml():
37 | kml = os.path.exists('./kml/Foto.kml')
38 | if kml:
39 | return
40 | else:
41 | with open('./kml/Foto.kml' , 'w') as kml:
42 | text = """
43 |
44 |
45 | Foto.kml
46 | 1
47 |
58 |
69 |
70 |
71 | normal
72 | #s_ylw-pushpin
73 |
74 |
75 | highlight
76 | #s_ylw-pushpin_hl
77 |
78 |
79 |
80 |
81 | """
82 | kml.write(text)
83 |
84 |
85 | def dir_generate(dir_name):
86 | dir_name = str(dir_name)
87 | if not os.path.exists(dir_name):
88 | try:
89 | os.makedirs(dir_name, 0o700)
90 | except OSError as e:
91 | if e.errno != errno.EEXIST:
92 | raise
93 |
94 |
95 | def write_kml(lon,lat):
96 | """
97 | To append a point in kml
98 | :param lon:
99 | :param lat:
100 | :return:
101 | """
102 | myfile = open('./kml/Foto.kml', 'r')
103 | doc=myfile.readlines()
104 | myfile.close()
105 |
106 | doc.insert(37,"""
107 | P
108 | #m_ylw-pushpin
109 |
110 | {},{},0.0
111 |
112 | """.format(lon,lat))
113 |
114 | f = open('./kml/Foto.kml', 'w')
115 | contents = "".join(doc)
116 | f.write(contents)
117 | f.close()
118 |
119 |
120 | def gps_dis(location_1,location_2):
121 | '''
122 | this is the calculation of the distance between two long/lat locations
123 | input tuple/list
124 | '''
125 | R = 6373.0
126 |
127 | lat1 = radians(location_1[1])
128 | lon1 = radians(location_1[0])
129 | lat2 = radians(location_2[1])
130 | lon2 = radians(location_2[0])
131 |
132 | dlon = lon2 - lon1
133 | dlat = lat2 - lat1
134 |
135 | a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
136 | c = 2 * atan2(sqrt(a), sqrt(1 - a))
137 |
138 | distance = R * c
139 | distance = distance*1000
140 | #print("Result:", distance)
141 | return distance
142 |
143 |
144 | def min2decimal(in_data):
145 | '''transforming the data of long lat '''
146 | latgps = float(in_data)
147 | latdeg = int(latgps / 100)
148 | latmin = latgps - latdeg * 100
149 | lat = latdeg + (latmin / 60)
150 | return lat
151 |
152 |
153 | def GPS():
154 | '''
155 | the main function of starting the GPS
156 | '''
157 | global camera_on, gps_on, current_location, lon, lat
158 | gps_on = False
159 | print('GPS start')
160 | serialPort = serial.Serial()
161 | serialPort.port = port_check()
162 | serialPort.baudrate = 4800
163 | serialPort.bytesize = serial.EIGHTBITS
164 | serialPort.parity = serial.PARITY_NONE
165 | serialPort.timeout = 2
166 | serialPort.open()
167 | print 'GPS opened successfully'
168 | gps_on = True
169 | lon, lat = 0, 0
170 | try:
171 | while True:
172 | line = serialPort.readline()
173 | data = line.split(",")
174 |
175 |
176 | if data[0] == '$GPRMC':
177 | if data[2] == "A":
178 | lat = min2decimal(data[3])
179 | lon = min2decimal(data[5])
180 |
181 | else:
182 | #print 'searching gps'
183 | pass
184 |
185 | elif data[0] == '$GPGGA':
186 | if data[6] == '1':
187 | lon = min2decimal(data[4])
188 | lat = min2decimal(data[2])
189 | else:
190 | #print 'searching gps'
191 | pass
192 |
193 | if lon ==0 or lat == 0:
194 | print 'gps not ready'
195 |
196 | else:
197 | current_location = (lon,lat)
198 | #print 'gps ready, current location:{}'.format(current_location)
199 |
200 | with open('./kml/live.kml', 'w') as pos:
201 | googleearth_message = '''
202 |
203 |
204 | Live Point
205 | hi im here
206 |
207 | {},{},0
208 |
209 |
210 | '''.format(lon, lat)
211 | pos.write(googleearth_message)
212 |
213 | if gps_on is False:
214 | break
215 | except serial.SerialException:
216 | print 'Error opening GPS'
217 | gps_on = False
218 | finally:
219 | serialPort.close()
220 | print('GPS finish')
221 |
222 |
223 | def Camera(file_name):
224 | print 'Camera start'
225 | global camera_on, camera_repeat, gps_on,i,gpsmsg,current_location, lon, lat
226 | camera_on = True
227 | camera_repeat = False
228 | foto_location = [0,0]
229 | current_location = [0,0]
230 | Pause = False
231 | take_pic = False
232 | i = 1
233 |
234 | bag_name = './bag/{}.bag'.format(file_name)
235 | fotolog = open('foto_log/{}.txt'.format(file_name), 'w')
236 |
237 | # set configurations and start
238 | pipeline = rs.pipeline()
239 | config = rs.config()
240 | config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
241 | config.enable_stream(rs.stream.color, 1920, 1080, rs.format.rgb8, 30)
242 | config.enable_record_to_file(bag_name)
243 | profile = pipeline.start(config)
244 | # get record device and pause it
245 | device = profile.get_device()
246 | recorder = device.as_recorder()
247 | recorder.pause()
248 | # get sensor and set to high density
249 | depth_sensor = device.first_depth_sensor()
250 | depth_sensor.set_option(rs.option.visual_preset, 4)
251 | dev_range = depth_sensor.get_option_range(rs.option.visual_preset)
252 | preset_name = depth_sensor.get_option_value_description(rs.option.visual_preset, 4)
253 | print preset_name
254 | # set frame queue size to max
255 | sensor = profile.get_device().query_sensors()
256 | for x in sensor:
257 | x.set_option(rs.option.frames_queue_size, 32)
258 | # set auto exposure but process data first
259 | color_sensor = profile.get_device().query_sensors()[1]
260 | color_sensor.set_option(rs.option.auto_exposure_priority, True)
261 |
262 | try:
263 | while True:
264 | present = datetime.datetime.now()
265 | date = '{},{},{},{}'.format(present.day, present.month, present.year, present.time())
266 | frames = pipeline.wait_for_frames()
267 | depth_frame = frames.get_depth_frame()
268 | color_frame = frames.get_color_frame()
269 | if not depth_frame or not color_frame:
270 | continue
271 | key = cv2.waitKeyEx(1)
272 |
273 | if Pause is True:
274 | if key == 98 or key == 32:
275 | take_pic = True
276 | elif Pause is False:
277 | if gps_dis(current_location, foto_location) > 15 or key == 98 or key == 32:
278 | take_pic = True
279 |
280 | if take_pic == True:
281 | recorder.resume()
282 | time.sleep(0.05)
283 | frames = pipeline.wait_for_frames()
284 | depth_frame = frames.get_depth_frame()
285 | color_frame = frames.get_color_frame()
286 | var = rs.frame.get_frame_number(color_frame)
287 | vard = rs.frame.get_frame_number(depth_frame)
288 | foto_location = (lon, lat)
289 | print 'photo taken at:{}'.format(foto_location)
290 | recorder.pause()
291 | logmsg = '{},{},{},{},{},{}\n'.format(i, str(var), str(vard), lon, lat, date)
292 | fotolog.write(logmsg)
293 | write_kml(lon, lat)
294 | i += 1
295 | take_pic = False
296 | continue
297 |
298 | if key & 0xFF == ord('q') or key == 27:
299 | cv2.destroyAllWindows()
300 | camera_on = False
301 | gps_on = False
302 | camera_repeat = False
303 | print('Camera finish\n')
304 | break
305 | elif key == 114 or key == 2228224:
306 | cv2.destroyAllWindows()
307 | camera_on = False
308 | camera_repeat = True
309 | print 'camera will restart'
310 | break
311 | elif gps_on is False:
312 | cv2.destroyAllWindows()
313 | camera_repeat = False
314 | break
315 | elif cv2.waitKey(1) & 0xFF == ord('p') or key == 2162688:
316 | if Pause is False:
317 | print 'pause pressed'
318 | Pause = True
319 | elif Pause is True:
320 | print 'restart'
321 | Pause = False
322 |
323 | depth_color_frame = rs.colorizer().colorize(depth_frame)
324 | depth_image = np.asanyarray(depth_color_frame.get_data())
325 | depth_colormap_resize = cv2.resize(depth_image,(424,240))
326 | color_image = np.asanyarray(color_frame.get_data())
327 | color_cvt = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
328 | color_cvt_2 = cv2.resize(color_cvt, (320,240))
329 | images = np.hstack((color_cvt_2, depth_colormap_resize))
330 |
331 | if Pause is True:
332 | cv2.rectangle(images, (420, 40), (220, 160), (0, 0, 255), -1)
333 | font = cv2.FONT_HERSHEY_SIMPLEX
334 | bottomLeftCornerOfText = (220, 110)
335 | fontScale = 2
336 | fontColor = (0, 0, 0)
337 | lineType = 4
338 | cv2.putText(images, 'Pause', bottomLeftCornerOfText, font, fontScale, fontColor, lineType)
339 |
340 | cv2.namedWindow('Color', cv2.WINDOW_AUTOSIZE)
341 | cv2.imshow('Color', images)
342 |
343 | except NameError:
344 | os._exit(0)
345 |
346 | finally:
347 | fotolog.close()
348 | pipeline.stop()
349 |
350 |
351 | def camera_loop():
352 | global camera_repeat, gps_on
353 | num = 1
354 | camera_repeat = True
355 | now = datetime.datetime.now()
356 | time.sleep(1)
357 | try:
358 | while gps_on is False:
359 | if gps_on is True:
360 | break
361 | finally:
362 | print 'Starting Camera'
363 |
364 | try:
365 | while gps_on is True:
366 | file_name = '{:02d}{:02d}_{:03d}'.format(now.month, now.day, num)
367 | bag_name = './bag/{}.bag'.format(file_name)
368 | exist = os.path.isfile(bag_name)
369 | if exist:
370 | num+=1
371 | elif camera_repeat == False:
372 | break
373 | else:
374 | print 'current filename:{}'.format(file_name)
375 | Camera(file_name)
376 | continue
377 |
378 | finally:
379 | pass
380 |
381 |
382 | def main():
383 | folder_list = ('bag','foto_log','kml')
384 | for folder in folder_list:
385 | dir_generate(folder)
386 |
387 | emptykml()
388 |
389 | gps_thread = threading.Thread(target=GPS, name='T1')
390 | camera_thread = threading.Thread(target=camera_loop, name='T2')
391 | gps_thread.start()
392 | camera_thread.start()
393 | camera_thread.join()
394 | gps_thread.join()
395 | print('all done\n')
396 |
397 |
398 | if __name__ == '__main__':
399 | main()
400 |
--------------------------------------------------------------------------------
/separate functions/single_frameset.py:
--------------------------------------------------------------------------------
1 | import pyrealsense2 as rs
2 | import numpy as np
3 | import cv2
4 |
5 | pipeline = rs.pipeline()
6 | config = rs.config()
7 | config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 15)
8 | config.enable_stream(rs.stream.color, 1920, 1080, rs.format.rgb8, 15)
9 | profile = pipeline.start(config)
10 |
11 | filt = rs.save_single_frameset()
12 |
13 | for x in range(100):
14 | pipeline.wait_for_frames()
15 |
16 | frame = pipeline.wait_for_frames()
17 | filt.process(frame)
--------------------------------------------------------------------------------