├── geocoder
├── requirements.txt
├── search-locations.csv
├── geocoded-output.csv
├── readme.md
└── geoCoder.py
├── linkchecker
├── requirements.txt
├── sources.json
├── readme.md
└── linkChecker.py
├── to_pascal
├── requirements.txt
├── README.md
├── templates
│ └── annotation.xml
├── pascal_voc_writer_fa.py
├── rename_files.py
├── config.py
└── supervisely_to_pascal_voc.py
├── video-pixel-profile
├── .gitignore
├── Temp_In
│ └── .gitignore
├── Temp_Out
│ └── .gitignore
├── requirements.txt
├── README.md
├── framerMidPointY.py
├── framerAverage.py
├── framer.py
└── framerMidPointX.py
├── split-textured-geo
├── README.md
├── tile-folder-50.sh
├── docs
│ ├── split-geometry-by-nurbs-maya.md
│ └── split-geometry-in-maya.md
├── all-imgs-skim-pixels.py
├── all-imgs-resize.py
└── tile-folder-169.sh
├── .gitignore
├── README.md
└── capture-timemap
└── capture_timemap.applescript
/geocoder/requirements.txt:
--------------------------------------------------------------------------------
1 | geopy
--------------------------------------------------------------------------------
/linkchecker/requirements.txt:
--------------------------------------------------------------------------------
1 | requests
--------------------------------------------------------------------------------
/to_pascal/requirements.txt:
--------------------------------------------------------------------------------
1 | jinja2
2 |
--------------------------------------------------------------------------------
/video-pixel-profile/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 |
--------------------------------------------------------------------------------
/geocoder/search-locations.csv:
--------------------------------------------------------------------------------
1 | state,city
2 | California,Los Angeles
--------------------------------------------------------------------------------
/video-pixel-profile/Temp_In/.gitignore:
--------------------------------------------------------------------------------
1 | *
2 | */
3 | !.gitignore
4 |
--------------------------------------------------------------------------------
/video-pixel-profile/Temp_Out/.gitignore:
--------------------------------------------------------------------------------
1 | *
2 | */
3 | !.gitignore
4 |
--------------------------------------------------------------------------------
/video-pixel-profile/requirements.txt:
--------------------------------------------------------------------------------
1 | opencv-python==4.0.0.21
2 | numpy==1.16.2
3 |
--------------------------------------------------------------------------------
/split-textured-geo/README.md:
--------------------------------------------------------------------------------
1 | # readme
2 |
3 | See docs/split-geometry-in-maya.md for instructions on how to tile a mesh in
4 | Maya (code included there).
5 |
--------------------------------------------------------------------------------
/geocoder/geocoded-output.csv:
--------------------------------------------------------------------------------
1 | city,state,latitude,longitude,display_name
2 | Los Angeles,California,34.0536909,-118.242766,"Los Angeles, Los Angeles County, California, United States"
3 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | .mayaSwatches/
3 | *.secret.py
4 | *.ipynb_checkpoints
5 |
6 | # Python files
7 | # ------------
8 | # Byte-compiled / optimized / DLL files
9 | .DS_Store
10 | __pycache__/*
11 | *.py[cod]
12 | geocoder-env
13 |
14 | # C extensions
15 | *.so
16 | # Installer logs
17 | pip-log.txt
18 | pip-delete-this-directory.txt
19 | tags
20 |
21 | results.csv
22 | scripts-env
--------------------------------------------------------------------------------
/split-textured-geo/tile-folder-50.sh:
--------------------------------------------------------------------------------
1 | FILE_EXT=jpg
2 |
3 | FOLDER_NAME=$1
4 |
5 | crop_and_tile () {
6 | # skim 127 pixels from the top
7 | convert $1 -crop 8440x5499+0+133 $1-int1.$FILE_EXT
8 | mkdir 422x423_img_$2
9 | convert $1-int1.$FILE_EXT +repage -crop 422x423 422x423_img_$2/img_%d.$FILE_EXT
10 | #cleanup
11 | rm $1-int1.$FILE_EXT
12 | # rm $1-int2.$FILE_EXT
13 | }
14 |
15 | for filename in $FOLDER_NAME*
16 | do
17 | FILE=`echo $filename| cut -d'/' -f 2`
18 | YEAR=`echo $FILE| cut -d'_' -f 1`
19 | crop_and_tile $filename $YEAR
20 | done
21 |
--------------------------------------------------------------------------------
/split-textured-geo/docs/split-geometry-by-nurbs-maya.md:
--------------------------------------------------------------------------------
1 | # Splitting a Mesh by NURBs grid (Maya)
2 |
3 | 1. Create NURBS shape or grid. This will work as a ‘cutter’ for the geometry in question.
4 | 2. Rotate the NURBS so that it is parallel with the geometry, and switch the topographical view ‘from above’.
5 | 3. Select the NURBS, and then shift-select the geometry. `Edit Mesh -> Project Curve on Mesh`
6 | 4. The curve projections will be selected by default. Shift-select the geometry as well. `Edit Mesh->Split Mesh with Projected Curve`
7 | 5. Plain select the geometry. `Mesh->Detach`, and then `Mesh->Separate`? Or maybe the other way around.
--------------------------------------------------------------------------------
/split-textured-geo/all-imgs-skim-pixels.py:
--------------------------------------------------------------------------------
1 | # from subprocess import call
2 | from os import listdir, makedirs, system
3 |
4 | yrs = [1945, 1949, 1956, 1958, 1960, 1961, 1964, 1966, 1968, 1971, 1974, 1976, 1980, 1984, 1986, 1990, 1995, 1999, 2018]
5 |
6 | for yr in yrs:
7 | dirname = './422x423_img_' + str(yr)
8 | new_dirname = './422x422_img_' + str(yr)
9 |
10 | paths = listdir(dirname)
11 |
12 | makedirs(new_dirname)
13 |
14 | for path in paths:
15 | nfilename = path[18:]
16 | ncmd = "convert {0} -crop 422x422+0+1 {1}".format(dirname+'/'+path, new_dirname+'/'+path)
17 | system(ncmd)
18 |
--------------------------------------------------------------------------------
/split-textured-geo/all-imgs-resize.py:
--------------------------------------------------------------------------------
1 | # from subprocess import call
2 | from os import listdir, makedirs, system
3 |
4 | yrs = [1945, 1949, 1956, 1958, 1960, 1961, 1964, 1966, 1968, 1971, 1974, 1976, 1980, 1984, 1986, 1990, 1995, 1999, 2018]
5 |
6 | for yr in yrs:
7 | dirname = './422x422_img_' + str(yr)
8 | new_dirname = './512x512_img_' + str(yr)
9 |
10 | paths = listdir(dirname)
11 |
12 | makedirs(new_dirname)
13 |
14 | for path in paths:
15 | nfilename = path[18:]
16 | ncmd = "convert {0} -resize 512x512 {1}".format(dirname+'/'+path, new_dirname+'/'+path)
17 | # print(ncmd)
18 | system(ncmd)
19 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Scripts
2 | A range of scripts developed at Forensic Architecture for processing image,
3 | video, sound, and 3D mesh.
4 |
5 | Each subfolder contains its own README which contains more detail about the
6 | script's use case, as well as instructions to install and run.
7 |
8 | | Folder | Description |
9 | | ------ | ----------- |
10 | | capture-timemap | Applescript (Mac-only) to automate making a film of a narrative in [timemap](https://github.com/forensic-architecture/timemap) |
11 | | split-textured-geo | split 3D mesh into tiles for the web (Maya) |
12 | | video-pixel-profile | synthesize a video into a single 'timeline' JPEG |
13 | | to_pascal | convert from supervisely format to Pascal VOC |
14 |
--------------------------------------------------------------------------------
/split-textured-geo/tile-folder-169.sh:
--------------------------------------------------------------------------------
1 | FILE_EXT=jpg
2 |
3 | FOLDER_NAME=$1
4 |
5 | crop_and_tile () {
6 | # skim 55 pixels from the top
7 | convert $1 -crop 8448x5577+0+55 $1-int1.$FILE_EXT
8 | # add 2 white pixels to the right
9 | convert $1-int1.$FILE_EXT -background white -gravity northeast -splice 2x0 $1-int2.$FILE_EXT
10 | mkdir 169x169_img_$2
11 | # tile in 169x169 panels
12 | convert $1-int2.$FILE_EXT +repage -crop 169x169 169x169_img_$2/img_%d.$FILE_EXT
13 | #cleanup
14 | rm $1-int1.$FILE_EXT
15 | rm $1-int2.$FILE_EXT
16 | }
17 |
18 | for filename in $FOLDER_NAME*
19 | do
20 | FILE=`echo $filename| cut -d'/' -f 2`
21 | YEAR=`echo $FILE| cut -d'_' -f 1`
22 | crop_and_tile $filename $YEAR
23 | done
24 |
--------------------------------------------------------------------------------
/to_pascal/README.md:
--------------------------------------------------------------------------------
1 | This repo includes script to get a dataset labelled with supervise.ly into a Pascal VOC format, as well as the format for training YOLO based detectors.
2 |
3 | ## Installation
4 | pip install pascal-voc-writer
5 |
6 | ## Commands
7 | Assuming your dataset in supervise.ly format is in this repo, change the directories and other variables in `config.py`.
8 |
9 | If your dataset is not in this repo, make sure to change the path to absolute paths in `config.py`, mainly `json_path_pattern` and `img_patterns`.
10 |
11 | Run `python rename_file.py` to clean image and annotation jsons file paths into more consistent names.
12 |
13 | Run `python supervisely_to_pascal_voc.py` to get the dataset in PASCAL VOC format.
14 |
--------------------------------------------------------------------------------
/to_pascal/templates/annotation.xml:
--------------------------------------------------------------------------------
1 |
2 | {{ folder }}
3 | {{ filename }}
4 | {{ path }}
5 |
6 | {{ database }}
7 |
8 |
9 | {{ width }}
10 | {{ height }}
11 | {{ depth }}
12 |
13 | {{ segmented }}
14 | {% for object in objects %} {% endfor %}
26 |
27 |
--------------------------------------------------------------------------------
/linkchecker/sources.json:
--------------------------------------------------------------------------------
1 | {
2 | "An Good Example Source": {
3 | "paths": [
4 | "https://forensic-architecture.org/investigation/the-extrajudicial-execution-of-ahmad-erekat"
5 | ],
6 | "id": "An Example Source",
7 | "title": "An Example Source",
8 | "description": "An Example Source Description",
9 | "thumbnail": "https://content.forensic-architecture.org/wp-content/uploads/2021/02/Ahmad-Photo-768x432.jpg",
10 | "url": "https://forensic-architecture.org/investigation/the-extrajudicial-execution-of-ahmad-erekat",
11 | "type": ""
12 | },
13 | "An BAD Example Source": {
14 | "paths": [
15 | "https://www.bbc.com/news/wor-asia-56309338"
16 | ],
17 | "id": "An Example Source",
18 | "title": "An Example Source",
19 | "description": "An Example Source Description",
20 | "thumbnail": "https://content.forensic-architecture.org/wp-content/uploads/2021/02/Ahm-Photo-768x432.jpg",
21 | "url": "https://www.bbc.com/news/wor-asia-56309338",
22 | "type": ""
23 | }
24 | }
--------------------------------------------------------------------------------
/geocoder/readme.md:
--------------------------------------------------------------------------------
1 | # Geo Coder
2 |
3 |
4 | A tool that automatically 'geo-codes' a list of locations (States and Cities) generating longitude and latitude for each location so you don't have to manually look up the latitude and longitude.
5 |
6 |
7 | Uses: OpenStreetMap
8 |
9 | ## Install it
10 |
11 | ## Python and Pip set up
12 |
13 | Install Python and pip3 then use Pip to set up your Python local environment:
14 |
15 | ```
16 | pip install virtualenv
17 | virtualenv geocoder-env
18 | source geocoder-env/bin/activate
19 | ```
20 |
21 | Install dependencies:
22 |
23 | `pip install -r requirements.txt`
24 |
25 | ## Run it
26 |
27 | `python geoCoder.py search-locations.csv` (or the file of your choice)
28 |
29 | ## In
30 |
31 | A CSV with two columns:
32 |
33 | * state
34 | * city
35 |
36 | Replace:
37 |
38 | `California,Los Angeles`
39 |
40 | With your locations.
41 |
42 | ## Out
43 |
44 | `geocoded-output.csv`
45 |
46 | A CSV output with the following fields:
47 |
48 | * city - the city you passed in
49 | * state - the state you passed in
50 | * latitude - the latitude found
51 | * longitude - the longitude found
52 | * display_name - the location found so you can check against your input
53 |
54 | e.g.
55 |
56 | `Los Angeles,California,34.0536909,-118.242766,"Los Angeles, Los Angeles County, California, United States"`
57 |
--------------------------------------------------------------------------------
/capture-timemap/capture_timemap.applescript:
--------------------------------------------------------------------------------
1 | (*
2 | * Script to take successive photos of a narrative in Timemap.
3 | * Set `frames` to the number of steps in the narrative.
4 | * Change `frameRate if you want something faster/slower.
5 | * Open Safari with the timemap, and manually add "id='clickmepls'" to the right arrow div in narrative mode.
6 | * Requires `ffmpeg` to compose the frames into a video
7 | *)
8 |
9 | set frames to 370
10 | set dFolder to "~/Desktop/narrativecapture/"
11 | set frameRate to 3
12 |
13 | do shell script ("mkdir -p " & dFolder)
14 |
15 | set i to 0
16 | delay 3 -- Wait for 30 seconds.
17 | repeat frames times
18 | tell application "Safari"
19 | activate
20 | set winID to id of window 1
21 | end tell
22 | do shell script ("screencapture " & dFolder & "frame-" & i & ".png")
23 | tell application "Safari"
24 | -- NOTE: you need to manually add the id 'clickmepls' to the narrative arrow in Safari.
25 | do JavaScript "document.getElementById('clickmepls').click();" in current tab of first window
26 | end tell
27 | delay 1.5
28 | set i to i + 1
29 | end repeat
30 |
31 | do shell script ("ffmpeg -r " & frameRate & " -s 1920x1080 -start_number 0 -i frame-%d.png -vframes " & frames & " timemap_capture.mp4")
32 |
--------------------------------------------------------------------------------
/video-pixel-profile/README.md:
--------------------------------------------------------------------------------
1 | # Video Pixel Profile
2 |
3 | 
4 |
5 | These scripts index an MP4 video to produce an image that visually expresses
6 | what the video contains by sampling some pixels from each frame, and
7 | a 'timeline' of the video. The following heuristics are implemented:
8 |
9 | | Title | Description |
10 | | ----- | ----------- |
11 | | MidPointX | Sample a one-pixel-wide column in the center of each frame (the X midpoint), stacking the sample from each frame horizontally in the output JPG |
12 | | MidPointY | Sample a one-pixel-high row in the center of each frame ( the Y midpoint), stacking the sample from each frame vertically in the output JPG |
13 |
14 |
15 | ### Running
16 | * Install the dependencies in your local version of python 3:
17 |
18 | `python3 -m pip install -r requirements.txt`
19 |
20 | * Copy the videos you want to process in the ‘Temp_In’ folder.
21 | * On the terminal, run the following script:
22 |
23 | `python3 framerMidPointX.py`
24 |
25 | After the script has finished running, the resulting JPEGs can be found in
26 | 'Temp_Out'.
27 |
28 | #### Running on Windows
29 | Note that on Windows, the executable for the Python interpreter is usually
30 | downloaded as `py.exe`, and so you will need to use that to run commands
31 | instead of `python3`:
32 | `py.exe framerMidPointX.py`
33 |
--------------------------------------------------------------------------------
/to_pascal/pascal_voc_writer_fa.py:
--------------------------------------------------------------------------------
1 | import os
2 | from jinja2 import Environment, PackageLoader
3 |
4 |
5 | class Writer:
6 | def __init__(self, path, width, height, depth=3, database='Unknown', segmented=0):
7 | environment = Environment(loader=PackageLoader('pascal_voc_writer', 'templates'), keep_trailing_newline=True)
8 | self.annotation_template = environment.get_template('annotation.xml')
9 |
10 | abspath = os.path.abspath(path)
11 |
12 | self.template_parameters = {
13 | 'path': path,
14 | 'filename': os.path.basename(abspath),
15 | 'folder': os.path.basename(os.path.dirname(abspath)),
16 | 'width': width,
17 | 'height': height,
18 | 'depth': depth,
19 | 'database': database,
20 | 'segmented': segmented,
21 | 'objects': []
22 | }
23 |
24 | def addObject(self, name, xmin, ymin, xmax, ymax, pose='Unspecified', truncated=0, difficult=0):
25 | self.template_parameters['objects'].append({
26 | 'name': name,
27 | 'xmin': xmin,
28 | 'ymin': ymin,
29 | 'xmax': xmax,
30 | 'ymax': ymax,
31 | 'pose': pose,
32 | 'truncated': truncated,
33 | 'difficult': difficult,
34 | })
35 |
36 | def save(self, annotation_path):
37 | with open(annotation_path, 'w') as file:
38 | content = self.annotation_template.render(**self.template_parameters)
39 | file.write(content)
40 |
--------------------------------------------------------------------------------
/linkchecker/readme.md:
--------------------------------------------------------------------------------
1 | # Link Checker
2 |
3 | This is a script that batch verifies a list of source urls to see if they are broken. It works with the JSON format for timemap sources so you should be able to copy the output from the datasheet-server sources tab.
4 |
5 | ## Install
6 |
7 | Clone this repo from GitHub.
8 |
9 | ## Python and Pip set up
10 |
11 | Install Python and pip3 then use Pip to set up your Python local environment:
12 |
13 | ```
14 | pip install virtualenv
15 | virtualenv scripts-env
16 | source scripts-env/bin/activate
17 | ```
18 |
19 | Install dependencies:
20 |
21 | `pip install -r requirements.txt`
22 |
23 | ## Run
24 |
25 | Copy the JSON format sources from datasheet server into the sources.json in this directory.
26 |
27 | run:
28 |
29 | `python linkChecker.py sources.json`
30 |
31 | Once it completes it creates a file called `results.csv` that you can open and check all the urls that have been verified. The results include both success and errors where:
32 |
33 | * `success` - the url is correct. If the url path, url or thumbnail are empty you get a success.
34 |
35 | * `404` - this is most likely because the url is no longer there (the content has been moved) or the url is wrong and you need to check that it is correct.
36 |
37 | * other errors - these can occur because a server is no longer there, the request times out, or a multitude of other reasons.
38 |
39 | ## ignoring some urls
40 |
41 | Some urls mau not be accessible in your geogra[hic region but are accessible outside. If you have urls like this add them to the `ignoreList` array in `linkChecker.py`. Any ignored urls are included in the results of the `results.csv`
42 |
--------------------------------------------------------------------------------
/to_pascal/rename_files.py:
--------------------------------------------------------------------------------
1 | import glob, shutil
2 | import config as cfg
3 |
4 | def remove_last_occurence(mystr, removal, replacement):
5 | reverse_removal = removal[::-1]
6 | reverse_replacement = replacement[::-1]
7 | return mystr[::-1].replace(reverse_removal, reverse_replacement, 1)[::-1]
8 |
9 |
10 | def rename_files():
11 | for pattern in cfg.img_patterns:
12 | for file_path in glob.glob(pattern):
13 | print(file_path)
14 | new_path = file_path.replace('.', '_', file_path.count('.')-1)
15 | new_path = new_path.replace('_jpg', '')
16 | new_path = new_path.replace('_jpeg', '')
17 | new_path = new_path.replace('_png', '')
18 | new_path = new_path.replace('.jpeg', '.png')
19 | new_path = new_path.replace('.jpg', '.png')
20 | new_path = new_path.replace(' ', '_')
21 | shutil.move(file_path, new_path)
22 |
23 |
24 | for pattern in cfg.json_path_pattern:
25 | for file_path in glob.glob(pattern):
26 | print(file_path)
27 | new_path = remove_last_occurence(file_path, '.jpeg', '')
28 | new_path = remove_last_occurence(new_path, '.jpg', '')
29 | new_path = remove_last_occurence(new_path, '.png', '')
30 | new_path = file_path.replace('.', '_', file_path.count('.')-1)
31 | new_path = new_path.replace('_jpg', '')
32 | new_path = new_path.replace('_jpeg', '')
33 | new_path = new_path.replace('_png', '')
34 | new_path = new_path.replace(' ', '_')
35 | shutil.move(file_path, new_path)
36 |
37 | if __name__ == "__main__":
38 | rename_files()
39 |
--------------------------------------------------------------------------------
/video-pixel-profile/framerMidPointY.py:
--------------------------------------------------------------------------------
1 | """
2 | Prints an image with a color profile of each frame,
3 | averaging the color of each row in the frame.
4 | Left to right, each pixel-column corresponds to the horizontal color average of a frame.
5 | """
6 |
7 | import cv2
8 | import numpy as np
9 | import math
10 | from pathlib import Path
11 |
12 | # Prompt the user for a movie file
13 | filename = input('Enter the movie file: ')
14 |
15 | # Use openCV to capture
16 | mov = Path(filename)
17 | if mov.is_file():
18 | cap = cv2.VideoCapture(filename)
19 | else:
20 | print('Sorry, I could not open that file.')
21 | exit()
22 |
23 | # Get some video properties
24 | frameTotal = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
25 | width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
26 | height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
27 | fps = cap.get(cv2.CAP_PROP_FPS)
28 |
29 | if cap.isOpened() is False:
30 | print("I found the file, but was unable to open it.")
31 | else:
32 | print("Video opened. Number of frames: ", frameTotal, ", fps: ", fps)
33 |
34 | # Create a 1024x1024x3 array of 8 bit unsigned integers
35 | newImg = np.zeros( (frameTotal, 1920 ,3), dtype=np.uint8 )
36 |
37 | # Iterate over each frame
38 | success = True
39 | frameCount = 0
40 | while frameCount < frameTotal and success:
41 | success,image = cap.read()
42 | row = math.floor(height / 2)
43 | col = 0
44 | # color info is b, g, r
45 | while col < width:
46 | blue = image[row, col][0]
47 | green = image[row, col][1]
48 | red = image[row, col][2]
49 | newImg.itemset((frameCount, col, 0), blue)
50 | newImg.itemset((frameCount, col, 1), green)
51 | newImg.itemset((frameCount, col, 2), red)
52 | col += 1
53 |
54 | print('Processing frame %d...' % frameCount)
55 | frameCount += 1
56 |
57 | # Print the thing
58 | cv2.imwrite("profileMidPointY.jpg", newImg)
59 |
--------------------------------------------------------------------------------
/video-pixel-profile/framerAverage.py:
--------------------------------------------------------------------------------
1 | """
2 | Prints an image with a color profile of each frame,
3 | averaging the color of each row in the frame.
4 | Left to right, each pixel-column corresponds to the horizontal color average of a frame.
5 | """
6 |
7 | import cv2
8 | import numpy as np
9 | from pathlib import Path
10 |
11 | # Prompt the user for a movie file
12 | filename = input('Enter the movie file: ')
13 |
14 | # Use openCV to capture
15 | mov = Path(filename)
16 | if mov.is_file():
17 | cap = cv2.VideoCapture(filename)
18 | else:
19 | print('Sorry, I could not open that file.')
20 | exit()
21 |
22 | # Get some video properties
23 | frameTotal = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
24 | width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
25 | height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
26 | fps = cap.get(cv2.CAP_PROP_FPS)
27 |
28 | if cap.isOpened() is False:
29 | print("I found the file, but was unable to open it.")
30 | else:
31 | print("Video opened. Number of frames: ", frameTotal, ", fps: ", fps)
32 |
33 | # Create a 1024x1024x3 array of 8 bit unsigned integers
34 | newImg = np.zeros( (1080, frameTotal ,3), dtype=np.uint8 )
35 |
36 | # Iterate over each frame
37 | success = True
38 | frameCount = 0
39 | while frameCount < frameTotal and success:
40 | success,image = cap.read()
41 | row = 0
42 | col = 0
43 | # color info is b, g, r
44 | while row < height:
45 | blue = 0.0
46 | green = 0.0
47 | red = 0.0
48 | while col < width:
49 | blue += image[row, col][0]
50 | green += image[row, col][1]
51 | red += image[row, col][2]
52 | col += 1
53 | col = 0
54 | newImg.itemset((row, frameCount, 0), int(blue/width))
55 | newImg.itemset((row, frameCount, 1), int(green/width))
56 | newImg.itemset((row, frameCount, 2), int(red/width))
57 | row += 1
58 |
59 | print('Processing frame %d...' % frameCount)
60 | frameCount += 1
61 |
62 | # Print the thing
63 | cv2.imwrite("profile_averageX.jpg", newImg)
64 |
--------------------------------------------------------------------------------
/video-pixel-profile/framer.py:
--------------------------------------------------------------------------------
1 | """
2 | Prints an image with a color profile of each frame,
3 | averaging the color of each row in the frame.
4 | Left to right, each pixel-column corresponds to the horizontal color average of a frame.
5 | """
6 |
7 | import cv2
8 | import numpy as np
9 | from pathlib import Path
10 |
11 | # Prompt the user for a movie file
12 | filename = input('Enter the movie file: ')
13 | profile_type = input('Enter the profile type: \n(1) Average X\n(2) Average Y\n(3) Sample X\n(4) Sample Y\n')
14 |
15 | # Use openCV to capture
16 | mov = Path(filename)
17 | if mov.is_file():
18 | cap = cv2.VideoCapture(filename)
19 | else:
20 | print('Sorry, I could not open that file.')
21 | exit()
22 |
23 | if cap.isOpened() is False:
24 | print("I found the file, but was unable to open it.")
25 | exit()
26 | else:
27 | print("Video opened. Number of frames: ", frameTotal, ", fps: ", fps)
28 |
29 | # Get some video properties
30 | frameTotal = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
31 | width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
32 | height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
33 | fps = cap.get(cv2.CAP_PROP_FPS)
34 |
35 | # Create a 1024x1024x3 array of 8 bit unsigned integers
36 | if profile_type == '1' or profile_type == '3':
37 | newImg = np.zeros( (1080, frameTotal, 3), dtype=np.uint8 )
38 | elif profile_type == '2' or profile_type == '4':
39 | newImg = np.zeros( (frameTotal, 1920 ,3), dtype=np.uint8 )
40 | else:
41 | print('That was not a reasonable option.')
42 | exit()
43 |
44 | # Iterate over each frame
45 | success = True
46 | frameCount = 0
47 | while frameCount < frameTotal and success:
48 | success,image = cap.read()
49 | row = 0
50 | col = 0
51 | # color info is b, g, r
52 | while row < height:
53 | blue = 0.0
54 | green = 0.0
55 | red = 0.0
56 | while col < width:
57 | blue += image[row, col][0]
58 | green += image[row, col][1]
59 | red += image[row, col][2]
60 | col += 1
61 | col = 0
62 | newImg.itemset((row, frameCount, 0), int(blue/width))
63 | newImg.itemset((row, frameCount, 1), int(green/width))
64 | newImg.itemset((row, frameCount, 2), int(red/width))
65 | row += 1
66 |
67 | print('Processing frame %d...' % frameCount)
68 | frameCount += 1
69 |
70 | # Print the thing
71 | cv2.imwrite("profile_averageX.jpg", newImg)
72 |
--------------------------------------------------------------------------------
/geocoder/geoCoder.py:
--------------------------------------------------------------------------------
1 | import csv as csv
2 | import sys
3 | import geopy
4 | from geopy.geocoders import Nominatim
5 | from geopy.extra.rate_limiter import RateLimiter
6 |
7 |
8 | def main():
9 | csvFile = ''
10 | for arg in sys.argv[1:]:
11 | csvFile = arg
12 | locations = getLocations(csvFile)
13 | locationsResult = []
14 |
15 | for location in locations:
16 | locator = Nominatim(user_agent='myGeocoder')
17 | locationQuery = {'city': location['city'], 'state': location['state']}
18 | locationGeocoded = locator.geocode(locationQuery)
19 |
20 | geocode = RateLimiter(locator.geocode, min_delay_seconds=1)
21 | location_city = location['city']
22 | location_state = location['state']
23 | if locationGeocoded:
24 | raw = locationGeocoded.raw
25 | print(raw)
26 | stripped = {'city': location_city,
27 | 'state': location_state,
28 | 'latitude': raw['lat'],
29 | 'longitude': raw['lon'],
30 | 'display_name': raw['display_name']
31 | }
32 | locationsResult.append(stripped)
33 | else:
34 | locationsResult.append({'city': location_city,
35 | 'state': location_state,
36 | 'latitude': 'NOT_FOUND',
37 | 'longitude': 'NOT_FOUND',
38 | 'display_name': 'NOT_FOUND'}
39 | )
40 | csvOutputFile = 'geocoded-output.csv'
41 | with open(csvOutputFile, 'w', newline='') as csvOutputFile:
42 | fieldnames = ['city', 'state', 'latitude', 'longitude', 'display_name']
43 | writer = csv.DictWriter(csvOutputFile, fieldnames)
44 | writer.writeheader()
45 | writer.writerows(locationsResult)
46 |
47 |
48 | def getLocations(file):
49 | locations = []
50 | with open(file, newline='') as file:
51 | reader = csv.DictReader(file)
52 | for row in reader:
53 | city = row['city']
54 | state = row['state']
55 | locations.append({'city': city, 'state': state})
56 | return locations
57 |
58 |
59 | if __name__ == "__main__":
60 | main()
61 |
--------------------------------------------------------------------------------
/video-pixel-profile/framerMidPointX.py:
--------------------------------------------------------------------------------
1 | """
2 | Prints an image with a color profile of each frame,
3 | averaging the color of each row in the frame.
4 | Left to right, each pixel-column corresponds to the horizontal color average of a frame.
5 | """
6 | import os
7 | import cv2
8 | import numpy as np
9 | import math
10 | from pathlib import Path
11 |
12 | in_folder = "Temp_In"
13 | out_folder = "Temp_Out"
14 | files = [f for f in os.listdir(in_folder) if os.path.isfile(os.path.join(in_folder, f))]
15 |
16 |
17 | def output_file(orig_filename):
18 | return f"{out_folder}/profile_sampleX_{filename}.jpg"
19 |
20 |
21 | for filename in files:
22 | # use openCV to capture
23 | mov = Path(os.path.join(in_folder, filename))
24 | if mov.is_file():
25 | cap = cv2.VideoCapture(os.path.join(in_folder, filename))
26 | print("Working on file %s" % filename)
27 | else:
28 | print("Sorry, I could not open that file.")
29 | exit()
30 |
31 | # get some video properties
32 | frameTotal = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
33 | width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
34 | height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
35 | fps = cap.get(cv2.CAP_PROP_FPS)
36 |
37 | if cap.isOpened() is False:
38 | print("I found the file, but was unable to open it.")
39 | continue
40 | else:
41 | print("Video opened. Number of frames: ", frameTotal, ", fps: ", fps)
42 |
43 | # create a {height}x{frameTotal}x3 tensor of 8 bit unsigned integers
44 | newImg = np.zeros((height, frameTotal, 3), dtype=np.uint8)
45 | success = True
46 | frameCount = 0
47 | col = math.floor(width / 2)
48 |
49 | print(f"filename: {filename}")
50 | print(f"frameTotal: {frameTotal}")
51 | print(f"width: {width}")
52 | print(f"height: {height}")
53 | print(f"fps: {fps}")
54 | print(f"sampling column: {col}")
55 |
56 | print("... ...processing... ...")
57 | while frameCount < frameTotal and success:
58 | row = 0
59 | success, image = cap.read()
60 |
61 | # transplant the center column of pixels into the newImage
62 | while row < height and success:
63 | blue = image[row, col][0]
64 | green = image[row, col][1]
65 | red = image[row, col][2]
66 | newImg.itemset((row, frameCount, 0), blue)
67 | newImg.itemset((row, frameCount, 1), green)
68 | newImg.itemset((row, frameCount, 2), red)
69 | row += 1
70 |
71 | frameCount += 1
72 |
73 | # Print the thing
74 | cv2.imwrite(output_file(filename), newImg)
75 | print(f"{output_file(filename)} created.")
76 | print("---------------------------------")
77 |
--------------------------------------------------------------------------------
/linkchecker/linkChecker.py:
--------------------------------------------------------------------------------
1 | import csv as csv
2 | import requests
3 | import sys
4 | import json
5 | import socket
6 | from socket import timeout
7 |
8 |
9 | # add links to be ignored
10 | ignoreList = []
11 |
12 | timeout = 5
13 | socket.setdefaulttimeout(timeout)
14 |
15 | headers = {
16 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 YaBrowser/19.6.1.153 Yowser/2.5 Safari/537.36'}
17 |
18 | def main():
19 | fileLocation = ''
20 | for arg in sys.argv[1:]:
21 | fileLocation = arg
22 | urlsToVerify = getJSONFields(fileLocation)
23 | results = []
24 | for urlItem in urlsToVerify:
25 | results.append(getUrl(urlItem))
26 |
27 | csvOutputFile = 'results.csv'
28 | with open(csvOutputFile, 'w', newline='') as csvOutputFile:
29 | fieldnames = ['error', 'type', 'url', 'id']
30 | writer = csv.DictWriter(csvOutputFile, fieldnames)
31 | writer.writeheader()
32 | writer.writerows(results)
33 |
34 | def getUrl(item):
35 | errors = []
36 | id = item['id']
37 | type = item['type']
38 | url = item['url']
39 | if (url == ''):
40 | return {'error': 'success', 'type': type, 'url': 'empty-url', 'id': id}
41 | if (url in ignoreList):
42 | return {'error': 'success', 'type': type, 'url': url, 'id': 'ignore'}
43 |
44 | try:
45 | response = requests.get(item['url'], headers=headers, timeout=5)
46 | if (not response.ok):
47 | print(response.ok)
48 | print(id)
49 | print(url)
50 | return {'error': '404', 'type': type, 'url': url, 'id': id}
51 | except (requests.exceptions.Timeout):
52 | print('[timeout]', id)
53 | return {'error': 'timeout', 'type': type, 'url': url, 'id': id}
54 |
55 | except (requests.exceptions.ConnectionError):
56 | print('[connection]', id)
57 | return {'error': 'connection', 'type': type, 'url': url, 'id': id}
58 |
59 | except requests.exceptions.RequestException as e:
60 | print(e, id)
61 | print(e, url)
62 |
63 | return {'error': e, 'type': type, 'url': url, 'id': id}
64 |
65 | return {'error': 'success', 'type': type }
66 |
67 | def getJSONFields(file):
68 | results = []
69 | with open(file, newline='') as file:
70 | data = json.load(file)
71 |
72 | for object in data:
73 | jsonObject = data[object]
74 | id = jsonObject['id']
75 |
76 | url = jsonObject['url'].strip()
77 | results.append({'id': id, 'url': url, 'type': 'url'})
78 |
79 | thumbnail = jsonObject['thumbnail'].strip()
80 | results.append({'id': id, 'url': thumbnail, 'type': 'thumbnail'})
81 |
82 | paths = jsonObject['paths']
83 | for path in paths:
84 | pathUrl = path.strip()
85 | results.append({'id': id, 'url': pathUrl, 'type': 'path'})
86 | return results
87 |
88 |
89 | if __name__ == "__main__":
90 | main()
91 |
--------------------------------------------------------------------------------
/to_pascal/config.py:
--------------------------------------------------------------------------------
1 | # COV format folder name where new annotations will written
2 | voc_folder_name = 'tc_basic_voc'
3 | original_dataset_name = 'tcs'
4 |
5 | # dataset name - different from voc_folder_name - used as prefix
6 | # for all files as well and to identify the dataset in other files
7 | dataset = 'tcs'
8 | # prefix when creating the numbered XML files with VOC format annotations
9 | prefix_im_name = dataset
10 |
11 | path_prefix = '/sata/datasets/dec8/tc_basic'
12 |
13 | # pattern to find the json files for the annotations
14 | # for each json, there must be a png file with the same name
15 | # but png extension, under imgs
16 | json_path_pattern = [f'{path_prefix}/{original_dataset_name}/*/*.json'] #'cansynth/2019*/*/*/*.json' #'not_can/*/*.json'
17 | # json_path_pattern = ['cansynth/2019*/*/*/*.json']
18 | # pattern for the image files
19 | img_patterns = [f'{path_prefix}/{original_dataset_name}/*/*.png', f'{path_prefix}/{original_dataset_name}/*/*.jpg', f'{path_prefix}/{original_dataset_name}/*/*.jpeg']
20 | # img_patterns = ['cansynth/2019*/*/*/*.png', 'cansynth/2019*/*/*/*.jpg', 'cansynth/2019*/*/*/*.jpeg']
21 |
22 |
23 |
24 | # original canister classes and their mapping to dataset ids
25 | classes_conversion = {'canister-general-bbox':0,
26 | 'triplecanister_top_bbox':0,
27 | 'canister': 0,
28 | 'triplechaser': 0,
29 | 'triple-bottom': 0,
30 | 'triple-top': 0,
31 | 'foambullet_bbox': 0,
32 | 'foambullet': 0,
33 | 'cylinder': 6,
34 | 'can': 10,
35 | 'bottle': 14,
36 | 'bin': 18}
37 |
38 | # mapping between the original canister classes to
39 | # the classes that will be used for training
40 | class_mapping = {'canister-general-bbox': 'canister',
41 | 'triplecanister_top_bbox': 'canister',
42 | 'triplechaser': 'canister',
43 | 'triple-bottom': 'canister',
44 | 'triple-top': 'canister',
45 | 'foambullet_bbox': 'canister',
46 | 'foambullet': 'canister',
47 | 'canister': 'canister',
48 | 'cylinder': 'cylinder',
49 | 'can': 'can',
50 | 'bottle': 'bottle',
51 | 'bin': 'bin',
52 | }
53 |
54 | # original canister classes and their mapping to dataset ids
55 | classes_conversion_1shot = {'canister-general-bbox':2,
56 | 'triplecanister_top_bbox':2,
57 | 'canister': 2,
58 | 'triplechaser': 2,
59 | 'triple-bottom': 2,
60 | 'triple-top': 2,
61 | 'foambullet_bbox': 2,
62 | 'foambullet': 2,
63 | 'cylinder': 6,
64 | 'can': 10,
65 | 'bottle': 14,
66 | 'bin': 18}
67 |
--------------------------------------------------------------------------------
/split-textured-geo/docs/split-geometry-in-maya.md:
--------------------------------------------------------------------------------
1 | # Tiling a mesh (DEM earth, v2)
2 |
3 | # Geometry
4 | 1. Import the pristine object into Maya. We want only the mesh; so remove all of the other streaks, strata, and `Freeze Transformations` and delete history.
5 | 2. Decide on a size for the tile. This needs to divide evenly into the number of faces along one side. For example, if the mesh is 1000 by 665 faces, I might decide I want 10 faces along that side. 1000/20 = 50, so each tile will be 50 faces wide.
6 | 3. In the case that you need a square tile, cut the necessary number of faces from the other side in order to make it evenly divisible. E.g. 665 -> 650, as 650/50 = 13.
7 | 4. Highlight and delete the excess faces from the top of the mesh. You can work out which is the top of the mesh by showing the texture, and eyeballing it with the image.
8 | 5. Save this file as **mesh-proportional.mb** (as a Maya binary). This mesh is now ready to be split into 260 50fx50f tiles (the mesh is 20x13 in tile terms).
9 | 6. We now separate the mesh into its tiles. Ensuring that the variables at the top of this script are appropriate, run:
10 | ```
11 | NO_OF_FACES = 50 # in each tile
12 | MESH_NAME = "polySurface1"
13 |
14 | from pymel.core import *
15 |
16 | # manually discovered distance between across edges: but could maybe infer programmatically in standardized mesh.
17 | def snd_hor(x):
18 | return (x-2)*3 + 4
19 | def snd_ver(x):
20 | return (x-1)*2001 + 1001
21 |
22 | FST_HOR = 2
23 | SND_HOR = snd_hor(NO_OF_FACES)
24 | FST_VER = 3
25 | SND_VER = snd_ver(NO_OF_FACES)
26 |
27 | polySelect(MESH_NAME, rpt=(FST_HOR,SND_HOR)) # across
28 | polySelect(MESH_NAME, rpt=(FST_VER,SND_VER), add=True) # up
29 | edgeIdxs = map(lambda e: e.index(), selected())
30 | for idx in edgeIdxs:
31 | polySelect(MESH_NAME, el=idx, add=True)
32 | ```
33 | * Edit Mesh -> Detach
34 | * Deselect, select mesh in object mode
35 | * Mesh -> Separate
36 | * Edit -> Delete By Type -> History
37 | * Remap the UV normals of all the objects. Select all the objects, and run the following MEL script
38 | ```
39 | string $array[] = `ls -sl`;
40 | for ($item in $array) {
41 | /* NB: this command just taken from UV Editor -> Create -> Automatic */
42 | polyAutoProjection -lm 0 -pb 0 -ibd 1 -cm 0 -l 2 -sc 1 -o 1 -p 6 -ps 0.2 -ws 0 $item;
43 | };
44 | ```
45 | * MEL script to export selected as individual obj files, renaming the export file appropriately, and making sure it exists.
46 | ```
47 | global proc exportSelected()
48 | {
49 | string $mySelection[] = `ls -sl`;
50 | for ($n=0 ; $n images
23 | --> image0.png
24 | --> image1.png
25 | --> imageN.png
26 | -> labels
27 | --> labels0.txt
28 | --> labels1.txt
29 | --> labelsN.txt
30 |
31 | In other words labels folder should be located next to the image folder in the same directory named "labels".
32 | """
33 | if not os.path.exists(cfg.voc_folder_name):
34 | os.mkdir(cfg.voc_folder_name)
35 |
36 | for subfolder in ['Annotations', 'JPEGImages', 'labels', 'labels_1c', 'ImageSets', 'ImageSets/Main']:
37 | if not os.path.exists(os.path.join(cfg.voc_folder_name, subfolder)):
38 | os.mkdir(os.path.join(cfg.voc_folder_name, subfolder))
39 |
40 | image_index = 0
41 | image_set = open(os.path.join(cfg.voc_folder_name, 'ImageSets', 'Main', cfg.dataset), 'w')
42 | # list_file = open('{}.txt'.format(cfg.dataset), 'w')
43 | for json_path_pattern in cfg.json_path_pattern:
44 | for file in glob.glob(json_path_pattern):
45 | prefix = '/' if file.startswith('/') else ''
46 | with open(file) as json_file:
47 | data = json.load(json_file)
48 | image_path = file.split('/')
49 | image_path[-1] = image_path[-1].split('.')[0] + '.jpg'
50 | image_path[-2] = 'img'
51 | image_path = os.path.join(*image_path)
52 | image_path = f'{prefix}{image_path}'
53 | image_name = cfg.prefix_im_name + '_' + str(image_index).zfill(5)
54 | new_image_path = f'{cfg.voc_folder_name}/JPEGImages/{image_name}.jpg'
55 | image_set.write(new_image_path+'\n')
56 | # list_file.write(new_image_path+'\n')
57 | image_index += 1
58 | try:
59 | shutil.copy(image_path, new_image_path)
60 | except:
61 | print('error', image_path, new_image_path)
62 |
63 | label_txt = open('{}/labels/{}.txt'.format(cfg.voc_folder_name, image_name), 'w')
64 | label_1shot = open('{}/labels_1c/{}.txt'.format(cfg.voc_folder_name, image_name), 'w')
65 | w = data["size"]["width"]
66 | h = data["size"]["height"]
67 | writer = Writer(new_image_path, w, h)
68 | for detBox in data['objects']:
69 | if ('bitmap' not in detBox or ('bitmap' in detBox and not detBox['bitmap'])) \
70 | and detBox['classTitle'] in cfg.class_mapping:
71 | classname = detBox['classTitle']
72 | p1, p2 = detBox['points']['exterior']
73 | x1, y1 = p1
74 | x2, y2 = p2
75 | x_min = min(x1, x2)
76 | x_max = max(x1, x2)
77 | y_min = min(y1, y2)
78 | y_max = max(y1, y2)
79 | writer.addObject(cfg.class_mapping[classname], x_min, y_min, x_max, y_max)
80 | bb = convert((int(w), int(h)), [float(a) for a in [x_min, x_max, y_min, y_max]])
81 | label_txt.write(str(cfg.classes_conversion[classname]) + " " + " ".join([str(a) for a in bb]) + '\n')
82 | label_1shot.write(str(cfg.classes_conversion_1shot[classname]) + " " + " ".join([str(a) for a in bb]) + '\n')
83 |
84 | writer.save('{}/Annotations/{}.xml'.format(cfg.voc_folder_name, image_name))
85 | label_txt.close()
86 | label_1shot.close()
87 |
88 | # list_file.close()
89 | image_set.close()
90 |
91 | if __name__ == "__main__":
92 | supervisely_to_pascal_voc()
93 |
94 |
--------------------------------------------------------------------------------