├── python_utils ├── __init__.py ├── callbacks.py ├── preprocessing.py └── utils.py ├── PSPNet-Keras-tensorflow ├── __init__.py ├── python_utils │ ├── __init__.py │ ├── callbacks.py │ ├── preprocessing.py │ └── utils.py ├── data ├── weights │ ├── keras │ │ ├── Put keras weights here │ │ └── .DS_Store │ ├── npy │ │ └── Put the npy weights here │ ├── caffe │ │ └── Put caffemodels & prototxts here │ └── .DS_Store ├── drawImage │ ├── __init__.py │ └── drawModule.py ├── caffe-tensorflow │ ├── examples │ │ ├── imagenet │ │ │ ├── models │ │ │ │ ├── __init__.py │ │ │ │ ├── alexnet.py │ │ │ │ ├── caffenet.py │ │ │ │ ├── nin.py │ │ │ │ ├── vgg.py │ │ │ │ └── helper.py │ │ │ ├── README.md │ │ │ ├── classify.py │ │ │ └── validate.py │ │ └── mnist │ │ │ ├── README.md │ │ │ └── finetune_mnist.py │ ├── kaffe │ │ ├── caffe │ │ │ ├── __init__.py │ │ │ └── resolver.py │ │ ├── tensorflow │ │ │ └── __init__.py │ │ ├── __init__.py │ │ ├── errors.py │ │ └── shapes.py │ ├── .style.yapf │ ├── .gitignore │ ├── .pylintrc │ ├── LICENSE.md │ ├── convert.py │ └── README.md ├── requirements.txt ├── .DS_Store ├── .gitignore ├── img_test.jpg ├── img_test2_seg.jpg ├── img_test2_probs.jpg ├── utils │ ├── color150 │ │ ├── bag.jpg │ │ ├── ball.jpg │ │ ├── bar.jpg │ │ ├── base.jpg │ │ ├── bed.jpg │ │ ├── bike.jpg │ │ ├── boat.jpg │ │ ├── book.jpg │ │ ├── box.jpg │ │ ├── bus.jpg │ │ ├── car.jpg │ │ ├── case.jpg │ │ ├── desk.jpg │ │ ├── door.jpg │ │ ├── fan.jpg │ │ ├── flag.jpg │ │ ├── food.jpg │ │ ├── hill.jpg │ │ ├── hood.jpg │ │ ├── lake.jpg │ │ ├── lamp.jpg │ │ ├── land.jpg │ │ ├── oven.jpg │ │ ├── palm.jpg │ │ ├── path.jpg │ │ ├── pier.jpg │ │ ├── pole.jpg │ │ ├── pot.jpg │ │ ├── road.jpg │ │ ├── rock.jpg │ │ ├── rug.jpg │ │ ├── sand.jpg │ │ ├── sea.jpg │ │ ├── seat.jpg │ │ ├── ship.jpg │ │ ├── sink.jpg │ │ ├── sky.jpg │ │ ├── sofa.jpg │ │ ├── step.jpg │ │ ├── tank.jpg │ │ ├── tent.jpg │ │ ├── tray.jpg │ │ ├── tree.jpg │ │ ├── van.jpg │ │ ├── vase.jpg │ │ ├── wall.jpg │ │ ├── animal.jpg │ │ ├── ashcan.jpg │ │ ├── awning.jpg │ │ ├── barrel.jpg │ │ ├── basket.jpg │ │ ├── bench.jpg │ │ ├── blind.jpg │ │ ├── booth.jpg │ │ ├── bottle.jpg │ │ ├── bridge.jpg │ │ ├── buffet.jpg │ │ ├── canopy.jpg │ │ ├── chair.jpg │ │ ├── clock.jpg │ │ ├── column.jpg │ │ ├── cradle.jpg │ │ ├── earth.jpg │ │ ├── fence.jpg │ │ ├── field.jpg │ │ ├── floor.jpg │ │ ├── flower.jpg │ │ ├── glass.jpg │ │ ├── grass.jpg │ │ ├── house.jpg │ │ ├── hovel.jpg │ │ ├── light.jpg │ │ ├── mirror.jpg │ │ ├── person.jpg │ │ ├── pillow.jpg │ │ ├── plant.jpg │ │ ├── plate.jpg │ │ ├── poster.jpg │ │ ├── river.jpg │ │ ├── runway.jpg │ │ ├── sconce.jpg │ │ ├── screen.jpg │ │ ├── shelf.jpg │ │ ├── shower.jpg │ │ ├── stage.jpg │ │ ├── stairs.jpg │ │ ├── stool.jpg │ │ ├── stove.jpg │ │ ├── table.jpg │ │ ├── toilet.jpg │ │ ├── towel.jpg │ │ ├── tower.jpg │ │ ├── truck.jpg │ │ ├── washer.jpg │ │ ├── water.jpg │ │ ├── airplane.jpg │ │ ├── apparel.jpg │ │ ├── armchair.jpg │ │ ├── bannister.jpg │ │ ├── bathtub.jpg │ │ ├── bicycle.jpg │ │ ├── blanket.jpg │ │ ├── bookcase.jpg │ │ ├── building.jpg │ │ ├── cabinet.jpg │ │ ├── ceiling.jpg │ │ ├── computer.jpg │ │ ├── counter.jpg │ │ ├── curtain.jpg │ │ ├── cushion.jpg │ │ ├── escalator.jpg │ │ ├── fireplace.jpg │ │ ├── fountain.jpg │ │ ├── furniture.jpg │ │ ├── houseware.jpg │ │ ├── microwave.jpg │ │ ├── minibike.jpg │ │ ├── monitor.jpg │ │ ├── mountain.jpg │ │ ├── ottoman.jpg │ │ ├── painting.jpg │ │ ├── plaything.jpg │ │ ├── radiator.jpg │ │ ├── railing.jpg │ │ ├── sculpture.jpg │ │ ├── sidewalk.jpg │ │ ├── signboard.jpg │ │ ├── stairway.jpg │ │ ├── wardrobe.jpg │ │ ├── waterfall.jpg │ │ ├── chandelier.jpg │ │ ├── countertop.jpg │ │ ├── crt screen.jpg │ │ ├── dirt track.jpg │ │ ├── dishwasher.jpg │ │ ├── grandstand.jpg │ │ ├── pool table.jpg │ │ ├── screen door.jpg │ │ ├── skyscraper.jpg │ │ ├── streetlight.jpg │ │ ├── trade name.jpg │ │ ├── windowpane.jpg │ │ ├── arcade machine.jpg │ │ ├── bulletin board.jpg │ │ ├── coffee table.jpg │ │ ├── conveyer belt.jpg │ │ ├── kitchen island.jpg │ │ ├── refrigerator.jpg │ │ ├── swimming pool.jpg │ │ ├── swivel chair.jpg │ │ ├── traffic light.jpg │ │ ├── chest of drawers.jpg │ │ └── television receiver.jpg │ └── colorization │ │ ├── color150.mat │ │ └── objectName150.mat ├── example_images │ ├── ade20k.jpg │ ├── pascal_voc.jpg │ └── test_pycaffe.jpg ├── img_test2_seg_blended.jpg ├── example_results │ ├── ade20k_seg.jpg │ ├── ade20k_probs.jpg │ ├── cityscapes_seg.jpg │ ├── pascal_voc_seg.jpg │ ├── ade20k_seg_blended.jpg │ ├── ade20k_seg_pycaffe.jpg │ ├── cityscapes_probs.jpg │ ├── pascal_voc_probs.jpg │ ├── cityscapes_seg_blended.jpg │ └── pascal_voc_seg_blended.jpg ├── LICENSE ├── weight_converter.py ├── pascal_voc_labels.py ├── README.md └── train.py ├── CycleGAN-tensorflow ├── requirements.txt ├── .DS_Store ├── .gitignore ├── imgs │ ├── teaser.jpg │ ├── n02381460_510.jpg │ ├── n02381460_4530.jpg │ ├── n02381460_4660.jpg │ ├── n02381460_8980.jpg │ ├── n02391049_1760.jpg │ ├── n02391049_3070.jpg │ ├── n02391049_5100.jpg │ ├── n02391049_7150.jpg │ ├── AtoB_n02381460_510.jpg │ ├── AtoB_n02381460_4530.jpg │ ├── AtoB_n02381460_4660.jpg │ ├── AtoB_n02381460_8980.jpg │ ├── BtoA_n02391049_1760.jpg │ ├── BtoA_n02391049_3070.jpg │ ├── BtoA_n02391049_5100.jpg │ └── BtoA_n02391049_7150.jpg ├── resize.py ├── download_dataset.sh ├── ops.py ├── main.py └── utilsc.py ├── process_all ├── .DS_Store ├── images │ ├── e1700.jpg │ ├── e1701.jpg │ ├── e1702.jpg │ ├── e1703.jpg │ ├── e1704.jpg │ ├── img_test.jpg │ ├── e1700_seg_read.png │ ├── e1701_seg_read.png │ ├── e1702_seg_read.png │ ├── e1703_seg_read.png │ ├── e1704_seg_read.png │ ├── img_test_read.png │ ├── img_test_seg_read.png │ └── test_mask_voc_seg_read.jpg ├── params.py ├── test_align.py ├── process_image.py └── align.py ├── utils ├── color150 │ ├── bag.jpg │ ├── bar.jpg │ ├── bed.jpg │ ├── box.jpg │ ├── bus.jpg │ ├── car.jpg │ ├── fan.jpg │ ├── pot.jpg │ ├── rug.jpg │ ├── sea.jpg │ ├── sky.jpg │ ├── van.jpg │ ├── ball.jpg │ ├── base.jpg │ ├── bench.jpg │ ├── bike.jpg │ ├── blind.jpg │ ├── boat.jpg │ ├── book.jpg │ ├── booth.jpg │ ├── case.jpg │ ├── chair.jpg │ ├── clock.jpg │ ├── desk.jpg │ ├── door.jpg │ ├── earth.jpg │ ├── fence.jpg │ ├── field.jpg │ ├── flag.jpg │ ├── floor.jpg │ ├── food.jpg │ ├── glass.jpg │ ├── grass.jpg │ ├── hill.jpg │ ├── hood.jpg │ ├── house.jpg │ ├── hovel.jpg │ ├── lake.jpg │ ├── lamp.jpg │ ├── land.jpg │ ├── light.jpg │ ├── oven.jpg │ ├── palm.jpg │ ├── path.jpg │ ├── pier.jpg │ ├── plant.jpg │ ├── plate.jpg │ ├── pole.jpg │ ├── river.jpg │ ├── road.jpg │ ├── rock.jpg │ ├── sand.jpg │ ├── seat.jpg │ ├── shelf.jpg │ ├── ship.jpg │ ├── sink.jpg │ ├── sofa.jpg │ ├── stage.jpg │ ├── step.jpg │ ├── stool.jpg │ ├── stove.jpg │ ├── table.jpg │ ├── tank.jpg │ ├── tent.jpg │ ├── towel.jpg │ ├── tower.jpg │ ├── tray.jpg │ ├── tree.jpg │ ├── truck.jpg │ ├── vase.jpg │ ├── wall.jpg │ ├── water.jpg │ ├── airplane.jpg │ ├── animal.jpg │ ├── apparel.jpg │ ├── armchair.jpg │ ├── ashcan.jpg │ ├── awning.jpg │ ├── barrel.jpg │ ├── basket.jpg │ ├── bathtub.jpg │ ├── bicycle.jpg │ ├── blanket.jpg │ ├── bookcase.jpg │ ├── bottle.jpg │ ├── bridge.jpg │ ├── buffet.jpg │ ├── building.jpg │ ├── cabinet.jpg │ ├── canopy.jpg │ ├── ceiling.jpg │ ├── column.jpg │ ├── computer.jpg │ ├── counter.jpg │ ├── cradle.jpg │ ├── curtain.jpg │ ├── cushion.jpg │ ├── flower.jpg │ ├── fountain.jpg │ ├── minibike.jpg │ ├── mirror.jpg │ ├── monitor.jpg │ ├── mountain.jpg │ ├── ottoman.jpg │ ├── painting.jpg │ ├── person.jpg │ ├── pillow.jpg │ ├── poster.jpg │ ├── radiator.jpg │ ├── railing.jpg │ ├── runway.jpg │ ├── sconce.jpg │ ├── screen.jpg │ ├── shower.jpg │ ├── sidewalk.jpg │ ├── stairs.jpg │ ├── stairway.jpg │ ├── toilet.jpg │ ├── wardrobe.jpg │ ├── washer.jpg │ ├── bannister.jpg │ ├── chandelier.jpg │ ├── countertop.jpg │ ├── crt screen.jpg │ ├── dirt track.jpg │ ├── dishwasher.jpg │ ├── escalator.jpg │ ├── fireplace.jpg │ ├── furniture.jpg │ ├── grandstand.jpg │ ├── houseware.jpg │ ├── microwave.jpg │ ├── plaything.jpg │ ├── pool table.jpg │ ├── sculpture.jpg │ ├── signboard.jpg │ ├── skyscraper.jpg │ ├── trade name.jpg │ ├── waterfall.jpg │ ├── windowpane.jpg │ ├── coffee table.jpg │ ├── conveyer belt.jpg │ ├── refrigerator.jpg │ ├── screen door.jpg │ ├── streetlight.jpg │ ├── swimming pool.jpg │ ├── swivel chair.jpg │ ├── traffic light.jpg │ ├── arcade machine.jpg │ ├── bulletin board.jpg │ ├── kitchen island.jpg │ ├── chest of drawers.jpg │ └── television receiver.jpg └── colorization │ ├── color150.mat │ └── objectName150.mat ├── params.py ├── README.md ├── weight_converter.py ├── .gitignore ├── ops.p ├── ops.py ├── pascal_voc_labels.py ├── tbot.py ├── train.py ├── process_image.py └── utilsc.py /python_utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/python_utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/data: -------------------------------------------------------------------------------- 1 | /home/avatar/datasets/ -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/weights/keras/Put keras weights here: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/weights/npy/Put the npy weights here: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/drawImage/__init__.py: -------------------------------------------------------------------------------- 1 | from drawModule import * -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/weights/caffe/Put caffemodels & prototxts here: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /CycleGAN-tensorflow/requirements.txt: -------------------------------------------------------------------------------- 1 | tensorflow-gpu 2 | numpy 3 | scipy 4 | pillow 5 | imageio 6 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/caffe-tensorflow/examples/imagenet/models/__init__.py: -------------------------------------------------------------------------------- 1 | from helper import * 2 | -------------------------------------------------------------------------------- /process_all/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/process_all/.DS_Store -------------------------------------------------------------------------------- /utils/color150/bag.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/bag.jpg -------------------------------------------------------------------------------- /utils/color150/bar.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/bar.jpg -------------------------------------------------------------------------------- /utils/color150/bed.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/bed.jpg -------------------------------------------------------------------------------- /utils/color150/box.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/box.jpg -------------------------------------------------------------------------------- /utils/color150/bus.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/bus.jpg -------------------------------------------------------------------------------- /utils/color150/car.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/car.jpg -------------------------------------------------------------------------------- /utils/color150/fan.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/fan.jpg -------------------------------------------------------------------------------- /utils/color150/pot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/pot.jpg -------------------------------------------------------------------------------- /utils/color150/rug.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/rug.jpg -------------------------------------------------------------------------------- /utils/color150/sea.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/sea.jpg -------------------------------------------------------------------------------- /utils/color150/sky.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/sky.jpg -------------------------------------------------------------------------------- /utils/color150/van.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/van.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | scipy 3 | tensorflow 4 | tensorflow-gpu 5 | keras 6 | -------------------------------------------------------------------------------- /utils/color150/ball.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/ball.jpg -------------------------------------------------------------------------------- /utils/color150/base.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/base.jpg -------------------------------------------------------------------------------- /utils/color150/bench.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/bench.jpg -------------------------------------------------------------------------------- /utils/color150/bike.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/bike.jpg -------------------------------------------------------------------------------- /utils/color150/blind.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/blind.jpg -------------------------------------------------------------------------------- /utils/color150/boat.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/boat.jpg -------------------------------------------------------------------------------- /utils/color150/book.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/book.jpg -------------------------------------------------------------------------------- /utils/color150/booth.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/booth.jpg -------------------------------------------------------------------------------- /utils/color150/case.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/case.jpg -------------------------------------------------------------------------------- /utils/color150/chair.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/chair.jpg -------------------------------------------------------------------------------- /utils/color150/clock.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/clock.jpg -------------------------------------------------------------------------------- /utils/color150/desk.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/desk.jpg -------------------------------------------------------------------------------- /utils/color150/door.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/door.jpg -------------------------------------------------------------------------------- /utils/color150/earth.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/earth.jpg -------------------------------------------------------------------------------- /utils/color150/fence.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/fence.jpg -------------------------------------------------------------------------------- /utils/color150/field.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/field.jpg -------------------------------------------------------------------------------- /utils/color150/flag.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/flag.jpg -------------------------------------------------------------------------------- /utils/color150/floor.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/floor.jpg -------------------------------------------------------------------------------- /utils/color150/food.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/food.jpg -------------------------------------------------------------------------------- /utils/color150/glass.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/glass.jpg -------------------------------------------------------------------------------- /utils/color150/grass.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/grass.jpg -------------------------------------------------------------------------------- /utils/color150/hill.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/hill.jpg -------------------------------------------------------------------------------- /utils/color150/hood.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/hood.jpg -------------------------------------------------------------------------------- /utils/color150/house.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/house.jpg -------------------------------------------------------------------------------- /utils/color150/hovel.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/hovel.jpg -------------------------------------------------------------------------------- /utils/color150/lake.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/lake.jpg -------------------------------------------------------------------------------- /utils/color150/lamp.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/lamp.jpg -------------------------------------------------------------------------------- /utils/color150/land.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/land.jpg -------------------------------------------------------------------------------- /utils/color150/light.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/light.jpg -------------------------------------------------------------------------------- /utils/color150/oven.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/oven.jpg -------------------------------------------------------------------------------- /utils/color150/palm.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/palm.jpg -------------------------------------------------------------------------------- /utils/color150/path.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/path.jpg -------------------------------------------------------------------------------- /utils/color150/pier.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/pier.jpg -------------------------------------------------------------------------------- /utils/color150/plant.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/plant.jpg -------------------------------------------------------------------------------- /utils/color150/plate.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/plate.jpg -------------------------------------------------------------------------------- /utils/color150/pole.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/pole.jpg -------------------------------------------------------------------------------- /utils/color150/river.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/river.jpg -------------------------------------------------------------------------------- /utils/color150/road.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/road.jpg -------------------------------------------------------------------------------- /utils/color150/rock.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/rock.jpg -------------------------------------------------------------------------------- /utils/color150/sand.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/sand.jpg -------------------------------------------------------------------------------- /utils/color150/seat.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/seat.jpg -------------------------------------------------------------------------------- /utils/color150/shelf.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/shelf.jpg -------------------------------------------------------------------------------- /utils/color150/ship.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/ship.jpg -------------------------------------------------------------------------------- /utils/color150/sink.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/sink.jpg -------------------------------------------------------------------------------- /utils/color150/sofa.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/sofa.jpg -------------------------------------------------------------------------------- /utils/color150/stage.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/stage.jpg -------------------------------------------------------------------------------- /utils/color150/step.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/step.jpg -------------------------------------------------------------------------------- /utils/color150/stool.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/stool.jpg -------------------------------------------------------------------------------- /utils/color150/stove.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/stove.jpg -------------------------------------------------------------------------------- /utils/color150/table.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/table.jpg -------------------------------------------------------------------------------- /utils/color150/tank.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/tank.jpg -------------------------------------------------------------------------------- /utils/color150/tent.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/tent.jpg -------------------------------------------------------------------------------- /utils/color150/towel.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/towel.jpg -------------------------------------------------------------------------------- /utils/color150/tower.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/tower.jpg -------------------------------------------------------------------------------- /utils/color150/tray.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/tray.jpg -------------------------------------------------------------------------------- /utils/color150/tree.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/tree.jpg -------------------------------------------------------------------------------- /utils/color150/truck.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/truck.jpg -------------------------------------------------------------------------------- /utils/color150/vase.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/vase.jpg -------------------------------------------------------------------------------- /utils/color150/wall.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/wall.jpg -------------------------------------------------------------------------------- /utils/color150/water.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/water.jpg -------------------------------------------------------------------------------- /utils/color150/airplane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/airplane.jpg -------------------------------------------------------------------------------- /utils/color150/animal.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/animal.jpg -------------------------------------------------------------------------------- /utils/color150/apparel.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/apparel.jpg -------------------------------------------------------------------------------- /utils/color150/armchair.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/armchair.jpg -------------------------------------------------------------------------------- /utils/color150/ashcan.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/ashcan.jpg -------------------------------------------------------------------------------- /utils/color150/awning.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/awning.jpg -------------------------------------------------------------------------------- /utils/color150/barrel.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/barrel.jpg -------------------------------------------------------------------------------- /utils/color150/basket.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/basket.jpg -------------------------------------------------------------------------------- /utils/color150/bathtub.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/bathtub.jpg -------------------------------------------------------------------------------- /utils/color150/bicycle.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/bicycle.jpg -------------------------------------------------------------------------------- /utils/color150/blanket.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/blanket.jpg -------------------------------------------------------------------------------- /utils/color150/bookcase.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/bookcase.jpg -------------------------------------------------------------------------------- /utils/color150/bottle.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/bottle.jpg -------------------------------------------------------------------------------- /utils/color150/bridge.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/bridge.jpg -------------------------------------------------------------------------------- /utils/color150/buffet.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/buffet.jpg -------------------------------------------------------------------------------- /utils/color150/building.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/building.jpg -------------------------------------------------------------------------------- /utils/color150/cabinet.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/cabinet.jpg -------------------------------------------------------------------------------- /utils/color150/canopy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/canopy.jpg -------------------------------------------------------------------------------- /utils/color150/ceiling.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/ceiling.jpg -------------------------------------------------------------------------------- /utils/color150/column.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/column.jpg -------------------------------------------------------------------------------- /utils/color150/computer.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/computer.jpg -------------------------------------------------------------------------------- /utils/color150/counter.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/counter.jpg -------------------------------------------------------------------------------- /utils/color150/cradle.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/cradle.jpg -------------------------------------------------------------------------------- /utils/color150/curtain.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/curtain.jpg -------------------------------------------------------------------------------- /utils/color150/cushion.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/cushion.jpg -------------------------------------------------------------------------------- /utils/color150/flower.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/flower.jpg -------------------------------------------------------------------------------- /utils/color150/fountain.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/fountain.jpg -------------------------------------------------------------------------------- /utils/color150/minibike.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/minibike.jpg -------------------------------------------------------------------------------- /utils/color150/mirror.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/mirror.jpg -------------------------------------------------------------------------------- /utils/color150/monitor.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/monitor.jpg -------------------------------------------------------------------------------- /utils/color150/mountain.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/mountain.jpg -------------------------------------------------------------------------------- /utils/color150/ottoman.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/ottoman.jpg -------------------------------------------------------------------------------- /utils/color150/painting.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/painting.jpg -------------------------------------------------------------------------------- /utils/color150/person.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/person.jpg -------------------------------------------------------------------------------- /utils/color150/pillow.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/pillow.jpg -------------------------------------------------------------------------------- /utils/color150/poster.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/poster.jpg -------------------------------------------------------------------------------- /utils/color150/radiator.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/radiator.jpg -------------------------------------------------------------------------------- /utils/color150/railing.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/railing.jpg -------------------------------------------------------------------------------- /utils/color150/runway.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/runway.jpg -------------------------------------------------------------------------------- /utils/color150/sconce.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/sconce.jpg -------------------------------------------------------------------------------- /utils/color150/screen.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/screen.jpg -------------------------------------------------------------------------------- /utils/color150/shower.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/shower.jpg -------------------------------------------------------------------------------- /utils/color150/sidewalk.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/sidewalk.jpg -------------------------------------------------------------------------------- /utils/color150/stairs.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/stairs.jpg -------------------------------------------------------------------------------- /utils/color150/stairway.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/stairway.jpg -------------------------------------------------------------------------------- /utils/color150/toilet.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/toilet.jpg -------------------------------------------------------------------------------- /utils/color150/wardrobe.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/wardrobe.jpg -------------------------------------------------------------------------------- /utils/color150/washer.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/washer.jpg -------------------------------------------------------------------------------- /CycleGAN-tensorflow/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/CycleGAN-tensorflow/.DS_Store -------------------------------------------------------------------------------- /process_all/images/e1700.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/process_all/images/e1700.jpg -------------------------------------------------------------------------------- /process_all/images/e1701.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/process_all/images/e1701.jpg -------------------------------------------------------------------------------- /process_all/images/e1702.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/process_all/images/e1702.jpg -------------------------------------------------------------------------------- /process_all/images/e1703.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/process_all/images/e1703.jpg -------------------------------------------------------------------------------- /process_all/images/e1704.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/process_all/images/e1704.jpg -------------------------------------------------------------------------------- /utils/color150/bannister.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/bannister.jpg -------------------------------------------------------------------------------- /utils/color150/chandelier.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/chandelier.jpg -------------------------------------------------------------------------------- /utils/color150/countertop.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/countertop.jpg -------------------------------------------------------------------------------- /utils/color150/crt screen.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/crt screen.jpg -------------------------------------------------------------------------------- /utils/color150/dirt track.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/dirt track.jpg -------------------------------------------------------------------------------- /utils/color150/dishwasher.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/dishwasher.jpg -------------------------------------------------------------------------------- /utils/color150/escalator.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/escalator.jpg -------------------------------------------------------------------------------- /utils/color150/fireplace.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/fireplace.jpg -------------------------------------------------------------------------------- /utils/color150/furniture.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/furniture.jpg -------------------------------------------------------------------------------- /utils/color150/grandstand.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/grandstand.jpg -------------------------------------------------------------------------------- /utils/color150/houseware.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/houseware.jpg -------------------------------------------------------------------------------- /utils/color150/microwave.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/microwave.jpg -------------------------------------------------------------------------------- /utils/color150/plaything.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/plaything.jpg -------------------------------------------------------------------------------- /utils/color150/pool table.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/pool table.jpg -------------------------------------------------------------------------------- /utils/color150/sculpture.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/sculpture.jpg -------------------------------------------------------------------------------- /utils/color150/signboard.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/signboard.jpg -------------------------------------------------------------------------------- /utils/color150/skyscraper.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/skyscraper.jpg -------------------------------------------------------------------------------- /utils/color150/trade name.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/trade name.jpg -------------------------------------------------------------------------------- /utils/color150/waterfall.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/waterfall.jpg -------------------------------------------------------------------------------- /utils/color150/windowpane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/windowpane.jpg -------------------------------------------------------------------------------- /CycleGAN-tensorflow/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .idea/* 3 | logs/* 4 | checkpoint/* 5 | datasets/* 6 | test/* 7 | sample/* -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/caffe-tensorflow/kaffe/caffe/__init__.py: -------------------------------------------------------------------------------- 1 | from .resolver import get_caffe_resolver, has_pycaffe 2 | -------------------------------------------------------------------------------- /process_all/images/img_test.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/process_all/images/img_test.jpg -------------------------------------------------------------------------------- /utils/color150/coffee table.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/coffee table.jpg -------------------------------------------------------------------------------- /utils/color150/conveyer belt.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/conveyer belt.jpg -------------------------------------------------------------------------------- /utils/color150/refrigerator.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/refrigerator.jpg -------------------------------------------------------------------------------- /utils/color150/screen door.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/screen door.jpg -------------------------------------------------------------------------------- /utils/color150/streetlight.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/streetlight.jpg -------------------------------------------------------------------------------- /utils/color150/swimming pool.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/swimming pool.jpg -------------------------------------------------------------------------------- /utils/color150/swivel chair.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/swivel chair.jpg -------------------------------------------------------------------------------- /utils/color150/traffic light.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/traffic light.jpg -------------------------------------------------------------------------------- /utils/colorization/color150.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/colorization/color150.mat -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/.DS_Store -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/.gitignore: -------------------------------------------------------------------------------- 1 | *.npy 2 | *.pyc 3 | *.png 4 | *.caffemodel 5 | *.prototxt 6 | *.h5 7 | *.json 8 | output/ 9 | -------------------------------------------------------------------------------- /utils/color150/arcade machine.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/arcade machine.jpg -------------------------------------------------------------------------------- /utils/color150/bulletin board.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/bulletin board.jpg -------------------------------------------------------------------------------- /utils/color150/kitchen island.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/kitchen island.jpg -------------------------------------------------------------------------------- /CycleGAN-tensorflow/imgs/teaser.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/CycleGAN-tensorflow/imgs/teaser.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/img_test.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/img_test.jpg -------------------------------------------------------------------------------- /process_all/images/e1700_seg_read.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/process_all/images/e1700_seg_read.png -------------------------------------------------------------------------------- /process_all/images/e1701_seg_read.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/process_all/images/e1701_seg_read.png -------------------------------------------------------------------------------- /process_all/images/e1702_seg_read.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/process_all/images/e1702_seg_read.png -------------------------------------------------------------------------------- /process_all/images/e1703_seg_read.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/process_all/images/e1703_seg_read.png -------------------------------------------------------------------------------- /process_all/images/e1704_seg_read.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/process_all/images/e1704_seg_read.png -------------------------------------------------------------------------------- /process_all/images/img_test_read.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/process_all/images/img_test_read.png -------------------------------------------------------------------------------- /utils/color150/chest of drawers.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/chest of drawers.jpg -------------------------------------------------------------------------------- /utils/colorization/objectName150.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/colorization/objectName150.mat -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/caffe-tensorflow/.style.yapf: -------------------------------------------------------------------------------- 1 | [style] 2 | based_on_style = chromium 3 | column_limit = 100 4 | indent_width = 4 5 | -------------------------------------------------------------------------------- /utils/color150/television receiver.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/utils/color150/television receiver.jpg -------------------------------------------------------------------------------- /CycleGAN-tensorflow/imgs/n02381460_510.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/CycleGAN-tensorflow/imgs/n02381460_510.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/img_test2_seg.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/img_test2_seg.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/weights/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/weights/.DS_Store -------------------------------------------------------------------------------- /process_all/images/img_test_seg_read.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/process_all/images/img_test_seg_read.png -------------------------------------------------------------------------------- /CycleGAN-tensorflow/imgs/n02381460_4530.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/CycleGAN-tensorflow/imgs/n02381460_4530.jpg -------------------------------------------------------------------------------- /CycleGAN-tensorflow/imgs/n02381460_4660.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/CycleGAN-tensorflow/imgs/n02381460_4660.jpg -------------------------------------------------------------------------------- /CycleGAN-tensorflow/imgs/n02381460_8980.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/CycleGAN-tensorflow/imgs/n02381460_8980.jpg -------------------------------------------------------------------------------- /CycleGAN-tensorflow/imgs/n02391049_1760.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/CycleGAN-tensorflow/imgs/n02391049_1760.jpg -------------------------------------------------------------------------------- /CycleGAN-tensorflow/imgs/n02391049_3070.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/CycleGAN-tensorflow/imgs/n02391049_3070.jpg -------------------------------------------------------------------------------- /CycleGAN-tensorflow/imgs/n02391049_5100.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/CycleGAN-tensorflow/imgs/n02391049_5100.jpg -------------------------------------------------------------------------------- /CycleGAN-tensorflow/imgs/n02391049_7150.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/CycleGAN-tensorflow/imgs/n02391049_7150.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/img_test2_probs.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/img_test2_probs.jpg -------------------------------------------------------------------------------- /CycleGAN-tensorflow/imgs/AtoB_n02381460_510.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/CycleGAN-tensorflow/imgs/AtoB_n02381460_510.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/caffe-tensorflow/kaffe/tensorflow/__init__.py: -------------------------------------------------------------------------------- 1 | from .transformer import TensorFlowTransformer 2 | from .network import Network 3 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/bag.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/bag.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/ball.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/ball.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/bar.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/bar.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/base.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/base.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/bed.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/bed.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/bike.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/bike.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/boat.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/boat.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/book.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/book.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/box.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/box.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/bus.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/bus.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/car.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/car.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/case.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/case.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/desk.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/desk.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/door.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/door.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/fan.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/fan.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/flag.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/flag.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/food.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/food.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/hill.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/hill.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/hood.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/hood.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/lake.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/lake.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/lamp.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/lamp.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/land.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/land.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/oven.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/oven.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/palm.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/palm.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/path.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/path.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/pier.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/pier.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/pole.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/pole.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/pot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/pot.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/road.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/road.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/rock.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/rock.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/rug.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/rug.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/sand.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/sand.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/sea.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/sea.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/seat.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/seat.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/ship.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/ship.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/sink.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/sink.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/sky.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/sky.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/sofa.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/sofa.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/step.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/step.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/tank.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/tank.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/tent.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/tent.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/tray.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/tray.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/tree.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/tree.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/van.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/van.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/vase.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/vase.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/wall.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/wall.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/weights/keras/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/weights/keras/.DS_Store -------------------------------------------------------------------------------- /process_all/images/test_mask_voc_seg_read.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/process_all/images/test_mask_voc_seg_read.jpg -------------------------------------------------------------------------------- /CycleGAN-tensorflow/imgs/AtoB_n02381460_4530.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/CycleGAN-tensorflow/imgs/AtoB_n02381460_4530.jpg -------------------------------------------------------------------------------- /CycleGAN-tensorflow/imgs/AtoB_n02381460_4660.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/CycleGAN-tensorflow/imgs/AtoB_n02381460_4660.jpg -------------------------------------------------------------------------------- /CycleGAN-tensorflow/imgs/AtoB_n02381460_8980.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/CycleGAN-tensorflow/imgs/AtoB_n02381460_8980.jpg -------------------------------------------------------------------------------- /CycleGAN-tensorflow/imgs/BtoA_n02391049_1760.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/CycleGAN-tensorflow/imgs/BtoA_n02391049_1760.jpg -------------------------------------------------------------------------------- /CycleGAN-tensorflow/imgs/BtoA_n02391049_3070.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/CycleGAN-tensorflow/imgs/BtoA_n02391049_3070.jpg -------------------------------------------------------------------------------- /CycleGAN-tensorflow/imgs/BtoA_n02391049_5100.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/CycleGAN-tensorflow/imgs/BtoA_n02391049_5100.jpg -------------------------------------------------------------------------------- /CycleGAN-tensorflow/imgs/BtoA_n02391049_7150.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/CycleGAN-tensorflow/imgs/BtoA_n02391049_7150.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/example_images/ade20k.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/example_images/ade20k.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/img_test2_seg_blended.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/img_test2_seg_blended.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/animal.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/animal.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/ashcan.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/ashcan.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/awning.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/awning.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/barrel.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/barrel.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/basket.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/basket.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/bench.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/bench.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/blind.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/blind.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/booth.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/booth.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/bottle.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/bottle.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/bridge.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/bridge.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/buffet.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/buffet.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/canopy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/canopy.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/chair.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/chair.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/clock.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/clock.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/column.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/column.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/cradle.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/cradle.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/earth.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/earth.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/fence.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/fence.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/field.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/field.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/floor.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/floor.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/flower.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/flower.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/glass.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/glass.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/grass.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/grass.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/house.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/house.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/hovel.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/hovel.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/light.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/light.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/mirror.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/mirror.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/person.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/person.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/pillow.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/pillow.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/plant.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/plant.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/plate.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/plate.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/poster.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/poster.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/river.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/river.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/runway.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/runway.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/sconce.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/sconce.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/screen.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/screen.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/shelf.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/shelf.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/shower.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/shower.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/stage.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/stage.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/stairs.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/stairs.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/stool.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/stool.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/stove.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/stove.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/table.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/table.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/toilet.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/toilet.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/towel.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/towel.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/tower.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/tower.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/truck.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/truck.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/washer.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/washer.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/water.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/water.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/airplane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/airplane.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/apparel.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/apparel.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/armchair.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/armchair.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/bannister.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/bannister.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/bathtub.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/bathtub.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/bicycle.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/bicycle.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/blanket.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/blanket.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/bookcase.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/bookcase.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/building.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/building.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/cabinet.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/cabinet.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/ceiling.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/ceiling.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/computer.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/computer.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/counter.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/counter.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/curtain.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/curtain.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/cushion.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/cushion.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/escalator.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/escalator.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/fireplace.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/fireplace.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/fountain.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/fountain.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/furniture.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/furniture.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/houseware.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/houseware.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/microwave.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/microwave.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/minibike.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/minibike.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/monitor.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/monitor.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/mountain.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/mountain.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/ottoman.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/ottoman.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/painting.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/painting.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/plaything.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/plaything.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/radiator.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/radiator.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/railing.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/railing.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/sculpture.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/sculpture.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/sidewalk.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/sidewalk.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/signboard.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/signboard.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/stairway.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/stairway.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/wardrobe.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/wardrobe.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/waterfall.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/waterfall.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/example_images/pascal_voc.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/example_images/pascal_voc.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/example_results/ade20k_seg.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/example_results/ade20k_seg.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/chandelier.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/chandelier.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/countertop.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/countertop.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/crt screen.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/crt screen.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/dirt track.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/dirt track.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/dishwasher.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/dishwasher.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/grandstand.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/grandstand.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/pool table.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/pool table.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/screen door.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/screen door.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/skyscraper.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/skyscraper.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/streetlight.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/streetlight.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/trade name.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/trade name.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/windowpane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/windowpane.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/example_images/test_pycaffe.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/example_images/test_pycaffe.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/example_results/ade20k_probs.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/example_results/ade20k_probs.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/arcade machine.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/arcade machine.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/bulletin board.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/bulletin board.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/coffee table.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/coffee table.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/conveyer belt.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/conveyer belt.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/kitchen island.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/kitchen island.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/refrigerator.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/refrigerator.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/swimming pool.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/swimming pool.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/swivel chair.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/swivel chair.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/traffic light.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/traffic light.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/colorization/color150.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/colorization/color150.mat -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/example_results/cityscapes_seg.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/example_results/cityscapes_seg.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/example_results/pascal_voc_seg.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/example_results/pascal_voc_seg.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/chest of drawers.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/chest of drawers.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/example_results/ade20k_seg_blended.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/example_results/ade20k_seg_blended.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/example_results/ade20k_seg_pycaffe.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/example_results/ade20k_seg_pycaffe.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/example_results/cityscapes_probs.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/example_results/cityscapes_probs.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/example_results/pascal_voc_probs.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/example_results/pascal_voc_probs.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/color150/television receiver.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/color150/television receiver.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/utils/colorization/objectName150.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/utils/colorization/objectName150.mat -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/caffe-tensorflow/kaffe/__init__.py: -------------------------------------------------------------------------------- 1 | from .graph import GraphBuilder, NodeMapper 2 | from .errors import KaffeError, print_stderr 3 | 4 | from . import tensorflow 5 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/caffe-tensorflow/kaffe/errors.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | class KaffeError(Exception): 4 | pass 5 | 6 | def print_stderr(msg): 7 | sys.stderr.write('%s\n' % msg) 8 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/example_results/cityscapes_seg_blended.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/example_results/cityscapes_seg_blended.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/example_results/pascal_voc_seg_blended.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/randomnoisevlad/persoml/HEAD/PSPNet-Keras-tensorflow/example_results/pascal_voc_seg_blended.jpg -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/caffe-tensorflow/.gitignore: -------------------------------------------------------------------------------- 1 | # OS X temporary metadata 2 | ._* 3 | *.DS_Store 4 | 5 | # Extracted parameters 6 | *.params 7 | 8 | # Python cache 9 | *.pyc 10 | 11 | -------------------------------------------------------------------------------- /CycleGAN-tensorflow/resize.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import cv2 3 | 4 | files = glob.glob('test/*.jpg') + glob.glob('test/*.png') 5 | 6 | for f in files: 7 | img = cv2.imread(f) 8 | img = cv2.resize(img, (150, 225)) 9 | cv2.imwrite(f, img) 10 | -------------------------------------------------------------------------------- /params.py: -------------------------------------------------------------------------------- 1 | MODIFIER = 1 2 | 3 | IMAGE_W = 150 * MODIFIER 4 | IMAGE_H = 225 * MODIFIER 5 | 6 | 7 | MEAN_WIDTH = 65 * MODIFIER 8 | HEAD_W = int(MEAN_WIDTH // 2.5) 9 | 10 | 11 | HEAD_X = (IMAGE_W - MEAN_WIDTH) // 2 12 | HEAD_Y = 3 * MODIFIER 13 | 14 | W_INCREASE = 5 * MODIFIER 15 | 16 | MODEL_INPUT_SIZE = (IMAGE_H, IMAGE_W, 3) 17 | MASK_SIZE = (IMAGE_H, IMAGE_W, 1) 18 | 19 | MORPH = 5 * MODIFIER 20 | ALPHA = 0.7 21 | 22 | CUM_REDUCE = 0.05 23 | CUM_INCREASE = 0.15 24 | 25 | INPAINT_INCREASE = 2 * MODIFIER -------------------------------------------------------------------------------- /process_all/params.py: -------------------------------------------------------------------------------- 1 | MODIFIER = 1 2 | 3 | IMAGE_W = 150 * MODIFIER 4 | IMAGE_H = 225 * MODIFIER 5 | 6 | 7 | MEAN_WIDTH = 65 * MODIFIER 8 | HEAD_W = int(MEAN_WIDTH // 2.5) 9 | 10 | 11 | HEAD_X = (IMAGE_W - MEAN_WIDTH) // 2 12 | HEAD_Y = 3 * MODIFIER 13 | 14 | W_INCREASE = 5 * MODIFIER 15 | 16 | MODEL_INPUT_SIZE = (IMAGE_H, IMAGE_W, 3) 17 | MASK_SIZE = (IMAGE_H, IMAGE_W, 1) 18 | 19 | MORPH = 5 * MODIFIER 20 | ALPHA = 0.7 21 | 22 | CUM_REDUCE = 0.05 23 | CUM_INCREASE = 0.15 24 | 25 | INPAINT_INCREASE = 2 * MODIFIER -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # personml 2 | 3 | Project for undressing cyclegan. You can test how it works using telegram bot - __@tunboxingbot__. 4 | 5 | Description on russian can be found [here](https://neurohive.io/projects/nsfw-images-generation-dressing-undressing/) 6 | 7 | ![cycleboxing](https://i.imgur.com/Px6yhw7.png) 8 | 9 | 10 | 11 | Code is very messy due to limited amount of time. Will be cleaned a bit later. 12 | 13 | ### Contains code: 14 | 15 | -- PSP-Net, see initial repository for details 16 | 17 | -- CycleGAN, use weights from [here](https://yadi.sk/d/pzvtwfwF3WJEDQ) 18 | 19 | -- Code for images preprocessing/postprocessing 20 | 21 | -- Code for server and telegram bot 22 | 23 | ### Dataset: 24 | -- Dataset for training can be downloaded [here](https://yadi.sk/d/hW7nDfaI3WJDLk) 25 | 26 | 27 | -------------------------------------------------------------------------------- /CycleGAN-tensorflow/download_dataset.sh: -------------------------------------------------------------------------------- 1 | mkdir datasets 2 | FILE=$1 3 | 4 | if [[ $FILE != "ae_photos" && $FILE != "apple2orange" && $FILE != "summer2winter_yosemite" && $FILE != "horse2zebra" && $FILE != "monet2photo" && $FILE != "cezanne2photo" && $FILE != "ukiyoe2photo" && $FILE != "vangogh2photo" && $FILE != "maps" && $FILE != "cityscapes" && $FILE != "facades" && $FILE != "iphone2dslr_flower" && $FILE != "ae_photos" ]]; then 5 | echo "Available datasets are: apple2orange, summer2winter_yosemite, horse2zebra, monet2photo, cezanne2photo, ukiyoe2photo, vangogh2photo, maps, cityscapes, facades, iphone2dslr_flower, ae_photos" 6 | exit 1 7 | fi 8 | 9 | URL=https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/$FILE.zip 10 | ZIP_FILE=./datasets/$FILE.zip 11 | TARGET_DIR=./datasets/$FILE/ 12 | wget -N $URL -O $ZIP_FILE 13 | mkdir $TARGET_DIR 14 | unzip $ZIP_FILE -d ./datasets/ 15 | rm $ZIP_FILE 16 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/caffe-tensorflow/examples/imagenet/models/alexnet.py: -------------------------------------------------------------------------------- 1 | from kaffe.tensorflow import Network 2 | 3 | class AlexNet(Network): 4 | def setup(self): 5 | (self.feed('data') 6 | .conv(11, 11, 96, 4, 4, padding='VALID', name='conv1') 7 | .lrn(2, 2e-05, 0.75, name='norm1') 8 | .max_pool(3, 3, 2, 2, padding='VALID', name='pool1') 9 | .conv(5, 5, 256, 1, 1, group=2, name='conv2') 10 | .lrn(2, 2e-05, 0.75, name='norm2') 11 | .max_pool(3, 3, 2, 2, padding='VALID', name='pool2') 12 | .conv(3, 3, 384, 1, 1, name='conv3') 13 | .conv(3, 3, 384, 1, 1, group=2, name='conv4') 14 | .conv(3, 3, 256, 1, 1, group=2, name='conv5') 15 | .max_pool(3, 3, 2, 2, padding='VALID', name='pool5') 16 | .fc(4096, name='fc6') 17 | .fc(4096, name='fc7') 18 | .fc(1000, relu=False, name='fc8') 19 | .softmax(name='prob')) 20 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/caffe-tensorflow/examples/imagenet/models/caffenet.py: -------------------------------------------------------------------------------- 1 | from kaffe.tensorflow import Network 2 | 3 | class CaffeNet(Network): 4 | def setup(self): 5 | (self.feed('data') 6 | .conv(11, 11, 96, 4, 4, padding='VALID', name='conv1') 7 | .max_pool(3, 3, 2, 2, padding='VALID', name='pool1') 8 | .lrn(2, 2e-05, 0.75, name='norm1') 9 | .conv(5, 5, 256, 1, 1, group=2, name='conv2') 10 | .max_pool(3, 3, 2, 2, padding='VALID', name='pool2') 11 | .lrn(2, 2e-05, 0.75, name='norm2') 12 | .conv(3, 3, 384, 1, 1, name='conv3') 13 | .conv(3, 3, 384, 1, 1, group=2, name='conv4') 14 | .conv(3, 3, 256, 1, 1, group=2, name='conv5') 15 | .max_pool(3, 3, 2, 2, padding='VALID', name='pool5') 16 | .fc(4096, name='fc6') 17 | .fc(4096, name='fc7') 18 | .fc(1000, relu=False, name='fc8') 19 | .softmax(name='prob')) 20 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/caffe-tensorflow/.pylintrc: -------------------------------------------------------------------------------- 1 | [MASTER] 2 | ignore=caffepb.py 3 | 4 | [MESSAGES CONTROL] 5 | disable=missing-docstring,invalid-name,wildcard-import,unused-wildcard-import,bad-builtin,no-self-use,locally-disabled 6 | 7 | [MISCELLANEOUS] 8 | # Exclude TODOs 9 | notes= 10 | 11 | [TYPECHECK] 12 | ignored-classes=numpy,cv2,NodeKind,LayerType,NetParameter,NpzFile 13 | 14 | [DESIGN] 15 | # Maximum number of arguments for function / method 16 | max-args=20 17 | # Maximum number of locals for function / method body 18 | max-locals=30 19 | # Maximum number of return / yield for function / method body 20 | max-returns=10 21 | # Maximum number of branch for function / method body 22 | max-branches=12 23 | # Maximum number of statements in function / method body 24 | max-statements=200 25 | # Maximum number of attributes for a class (see R0902). 26 | max-attributes=100 27 | # Maximum number of public methods for a class (see R0904). 28 | max-public-methods=200 29 | # Maximum number of boolean expressions in a if statement 30 | max-bool-expr=10 31 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 VladKry 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/caffe-tensorflow/examples/imagenet/models/nin.py: -------------------------------------------------------------------------------- 1 | from kaffe.tensorflow import Network 2 | 3 | class NiN(Network): 4 | def setup(self): 5 | (self.feed('data') 6 | .conv(11, 11, 96, 4, 4, padding='VALID', name='conv1') 7 | .conv(1, 1, 96, 1, 1, name='cccp1') 8 | .conv(1, 1, 96, 1, 1, name='cccp2') 9 | .max_pool(3, 3, 2, 2, name='pool1') 10 | .conv(5, 5, 256, 1, 1, name='conv2') 11 | .conv(1, 1, 256, 1, 1, name='cccp3') 12 | .conv(1, 1, 256, 1, 1, name='cccp4') 13 | .max_pool(3, 3, 2, 2, padding='VALID', name='pool2') 14 | .conv(3, 3, 384, 1, 1, name='conv3') 15 | .conv(1, 1, 384, 1, 1, name='cccp5') 16 | .conv(1, 1, 384, 1, 1, name='cccp6') 17 | .max_pool(3, 3, 2, 2, padding='VALID', name='pool3') 18 | .conv(3, 3, 1024, 1, 1, name='conv4-1024') 19 | .conv(1, 1, 1024, 1, 1, name='cccp7-1024') 20 | .conv(1, 1, 1000, 1, 1, name='cccp8-1024') 21 | .avg_pool(6, 6, 1, 1, padding='VALID', name='pool4') 22 | .softmax(name='prob')) 23 | -------------------------------------------------------------------------------- /python_utils/callbacks.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import keras.backend as K 4 | from keras.callbacks import Callback, TensorBoard, ReduceLROnPlateau, ModelCheckpoint 5 | 6 | class LrReducer(Callback): 7 | def __init__(self, base_lr = 0.01, max_epoch = 150, power=0.9, verbose=1): 8 | super(Callback, self).__init__() 9 | self.max_epoch = max_epoch 10 | self.power = power 11 | self.verbose = verbose 12 | self.base_lr = base_lr 13 | 14 | def on_epoch_end(self, epoch, logs={}): 15 | lr_now = K.get_value(self.model.optimizer.lr) 16 | new_lr = max(0.00001, min(self.base_lr * (1 - epoch / float(self.max_epoch))**self.power, lr_now)) 17 | K.set_value(self.model.optimizer.lr, new_lr) 18 | if self.verbose: 19 | print(" - learning rate: %10f" % (new_lr)) 20 | 21 | def callbacks(logdir): 22 | model_checkpoint = ModelCheckpoint("weights_train/weights.{epoch:02d}-{loss:.2f}.h5", monitor='loss', verbose=1, period=10) 23 | tensorboard_callback = TensorBoard(log_dir=logdir, write_graph=True, write_images=True, histogram_freq=1) 24 | plateau_callback = ReduceLROnPlateau(monitor='loss', factor=0.99, verbose=1, patience=0, min_lr=0.00001) 25 | #return [CheckPoints(), tensorboard_callback, LrReducer()] 26 | return [model_checkpoint, tensorboard_callback, plateau_callback, LrReducer()] 27 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/caffe-tensorflow/examples/imagenet/models/vgg.py: -------------------------------------------------------------------------------- 1 | from kaffe.tensorflow import Network 2 | 3 | class VGG16(Network): 4 | def setup(self): 5 | (self.feed('data') 6 | .conv(3, 3, 64, 1, 1, name='conv1_1') 7 | .conv(3, 3, 64, 1, 1, name='conv1_2') 8 | .max_pool(2, 2, 2, 2, name='pool1') 9 | .conv(3, 3, 128, 1, 1, name='conv2_1') 10 | .conv(3, 3, 128, 1, 1, name='conv2_2') 11 | .max_pool(2, 2, 2, 2, name='pool2') 12 | .conv(3, 3, 256, 1, 1, name='conv3_1') 13 | .conv(3, 3, 256, 1, 1, name='conv3_2') 14 | .conv(3, 3, 256, 1, 1, name='conv3_3') 15 | .max_pool(2, 2, 2, 2, name='pool3') 16 | .conv(3, 3, 512, 1, 1, name='conv4_1') 17 | .conv(3, 3, 512, 1, 1, name='conv4_2') 18 | .conv(3, 3, 512, 1, 1, name='conv4_3') 19 | .max_pool(2, 2, 2, 2, name='pool4') 20 | .conv(3, 3, 512, 1, 1, name='conv5_1') 21 | .conv(3, 3, 512, 1, 1, name='conv5_2') 22 | .conv(3, 3, 512, 1, 1, name='conv5_3') 23 | .max_pool(2, 2, 2, 2, name='pool5') 24 | .fc(4096, name='fc6') 25 | .fc(4096, name='fc7') 26 | .fc(1000, relu=False, name='fc8') 27 | .softmax(name='prob')) 28 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/python_utils/callbacks.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import keras.backend as K 4 | from keras.callbacks import Callback, TensorBoard, ReduceLROnPlateau, ModelCheckpoint 5 | 6 | class LrReducer(Callback): 7 | def __init__(self, base_lr = 0.01, max_epoch = 150, power=0.9, verbose=1): 8 | super(Callback, self).__init__() 9 | self.max_epoch = max_epoch 10 | self.power = power 11 | self.verbose = verbose 12 | self.base_lr = base_lr 13 | 14 | def on_epoch_end(self, epoch, logs={}): 15 | lr_now = K.get_value(self.model.optimizer.lr) 16 | new_lr = max(0.00001, min(self.base_lr * (1 - epoch / float(self.max_epoch))**self.power, lr_now)) 17 | K.set_value(self.model.optimizer.lr, new_lr) 18 | if self.verbose: 19 | print(" - learning rate: %10f" % (new_lr)) 20 | 21 | def callbacks(logdir): 22 | model_checkpoint = ModelCheckpoint("weights_train/weights.{epoch:02d}-{loss:.2f}.h5", monitor='loss', verbose=1, period=10) 23 | tensorboard_callback = TensorBoard(log_dir=logdir, write_graph=True, write_images=True, histogram_freq=1) 24 | plateau_callback = ReduceLROnPlateau(monitor='loss', factor=0.99, verbose=1, patience=0, min_lr=0.00001) 25 | #return [CheckPoints(), tensorboard_callback, LrReducer()] 26 | return [model_checkpoint, tensorboard_callback, plateau_callback, LrReducer()] 27 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/caffe-tensorflow/examples/mnist/README.md: -------------------------------------------------------------------------------- 1 | ### LeNet Example 2 | 3 | _Thanks to @Russell91 for this example_ 4 | 5 | This example showns you how to finetune code from the [Caffe MNIST tutorial](http://caffe.berkeleyvision.org/gathered/examples/mnist.html) using Tensorflow. 6 | First, you can convert a prototxt model to tensorflow code: 7 | 8 | $ ./convert.py examples/mnist/lenet.prototxt --code-output-path=mynet.py 9 | 10 | This produces tensorflow code for the LeNet network in `mynet.py`. The code can be imported as described below in the Inference section. Caffe-tensorflow also lets you convert `.caffemodel` weight files to `.npy` files that can be directly loaded from tensorflow: 11 | 12 | $ ./convert.py examples/mnist/lenet.prototxt --caffemodel examples/mnist/lenet_iter_10000.caffemodel --data-output-path=mynet.npy 13 | 14 | The above command will generate a weight file named `mynet.npy`. 15 | 16 | #### Inference: 17 | 18 | Once you have generated both the code weight files for LeNet, you can finetune LeNet using tensorflow with 19 | 20 | $ ./examples/mnist/finetune_mnist.py 21 | 22 | At a high level, `finetune_mnist.py` works as follows: 23 | 24 | ```python 25 | # Import the converted model's class 26 | from mynet import MyNet 27 | 28 | # Create an instance, passing in the input data 29 | net = MyNet({'data':my_input_data}) 30 | 31 | with tf.Session() as sesh: 32 | # Load the data 33 | net.load('mynet.npy', sesh) 34 | # Forward pass 35 | output = sesh.run(net.get_output(), ...) 36 | ``` 37 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/caffe-tensorflow/LICENSE.md: -------------------------------------------------------------------------------- 1 | # License 2 | 3 | Each contributor holds copyright over their contributions to Caffe-Tensorflow. In particular: 4 | 5 | - Any included network model is provided under its original license. 6 | 7 | - Any portion derived from Caffe is provided under its original license. 8 | 9 | - Caffe-tensorflow is provided under the MIT license, as specified below. 10 | 11 | # The MIT License (MIT) 12 | 13 | Copyright (c) 2016 Saumitro Dasgupta 14 | 15 | Permission is hereby granted, free of charge, to any person obtaining a copy 16 | of this software and associated documentation files (the "Software"), to deal 17 | in the Software without restriction, including without limitation the rights 18 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 19 | copies of the Software, and to permit persons to whom the Software is 20 | furnished to do so, subject to the following conditions: 21 | 22 | The above copyright notice and this permission notice shall be included in all 23 | copies or substantial portions of the Software. 24 | 25 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 26 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 27 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 28 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 29 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 30 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 | SOFTWARE. 32 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/caffe-tensorflow/kaffe/caffe/resolver.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | SHARED_CAFFE_RESOLVER = None 4 | 5 | class CaffeResolver(object): 6 | def __init__(self): 7 | self.import_caffe() 8 | 9 | def import_caffe(self): 10 | self.caffe = None 11 | try: 12 | # Try to import PyCaffe first 13 | import caffe 14 | self.caffe = caffe 15 | except ImportError: 16 | # Fall back to the protobuf implementation 17 | from . import caffepb 18 | self.caffepb = caffepb 19 | show_fallback_warning() 20 | if self.caffe: 21 | # Use the protobuf code from the imported distribution. 22 | # This way, Caffe variants with custom layers will work. 23 | self.caffepb = self.caffe.proto.caffe_pb2 24 | self.NetParameter = self.caffepb.NetParameter 25 | 26 | def has_pycaffe(self): 27 | return self.caffe is not None 28 | 29 | def get_caffe_resolver(): 30 | global SHARED_CAFFE_RESOLVER 31 | if SHARED_CAFFE_RESOLVER is None: 32 | SHARED_CAFFE_RESOLVER = CaffeResolver() 33 | return SHARED_CAFFE_RESOLVER 34 | 35 | def has_pycaffe(): 36 | return get_caffe_resolver().has_pycaffe() 37 | 38 | def show_fallback_warning(): 39 | msg = ''' 40 | ------------------------------------------------------------ 41 | WARNING: PyCaffe not found! 42 | Falling back to a pure protocol buffer implementation. 43 | * Conversions will be drastically slower. 44 | * This backend is UNTESTED! 45 | ------------------------------------------------------------ 46 | 47 | ''' 48 | sys.stderr.write(msg) 49 | -------------------------------------------------------------------------------- /weight_converter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from __future__ import print_function 4 | 5 | import sys 6 | from os.path import splitext 7 | import numpy as np 8 | 9 | import caffe 10 | 11 | # Not needed because Tensorflow and Caffe do convolution the same way 12 | # Needed for conversion to Theano 13 | 14 | 15 | def rot90(W): 16 | for i in range(W.shape[0]): 17 | for j in range(W.shape[1]): 18 | W[i, j] = np.rot90(W[i, j], 2) 19 | return W 20 | 21 | 22 | weights = {} 23 | assert "prototxt" in splitext( 24 | sys.argv[1])[1], "First argument must be caffe prototxt %s" % sys.argv[1] 25 | assert "caffemodel" in splitext( 26 | sys.argv[2])[1], "Second argument must be caffe weights %s" % sys.argv[2] 27 | net = caffe.Net(sys.argv[1], sys.argv[2], caffe.TEST) 28 | for k, v in net.params.items(): 29 | print("Layer %s, has %d params." % (k, len(v))) 30 | if len(v) == 1: 31 | W = v[0].data[...] 32 | W = np.transpose(W, (2, 3, 1, 0)) 33 | weights[k] = {"weights": W} 34 | elif len(v) == 2: 35 | W = v[0].data[...] 36 | W = np.transpose(W, (2, 3, 1, 0)) 37 | b = v[1].data[...] 38 | weights[k] = {"weights": W, "biases": b} 39 | elif len(v) == 4: # Batchnorm layer 40 | k = k.replace('/', '_') 41 | scale = v[0].data[...] 42 | offset = v[1].data[...] 43 | mean = v[2].data[...] 44 | variance = v[3].data[...] 45 | 46 | weights[k] = {"mean": mean, "variance": variance, 47 | "scale": scale, "offset": offset} 48 | 49 | else: 50 | print("Undefined layer") 51 | exit() 52 | 53 | arr = np.asarray(weights) 54 | weights_name = splitext(sys.argv[2])[0] + ".npy" 55 | np.save(weights_name.lower(), arr) 56 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/weight_converter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from __future__ import print_function 4 | 5 | import sys 6 | from os.path import splitext 7 | import numpy as np 8 | 9 | import caffe 10 | 11 | # Not needed because Tensorflow and Caffe do convolution the same way 12 | # Needed for conversion to Theano 13 | 14 | 15 | def rot90(W): 16 | for i in range(W.shape[0]): 17 | for j in range(W.shape[1]): 18 | W[i, j] = np.rot90(W[i, j], 2) 19 | return W 20 | 21 | 22 | weights = {} 23 | assert "prototxt" in splitext( 24 | sys.argv[1])[1], "First argument must be caffe prototxt %s" % sys.argv[1] 25 | assert "caffemodel" in splitext( 26 | sys.argv[2])[1], "Second argument must be caffe weights %s" % sys.argv[2] 27 | net = caffe.Net(sys.argv[1], sys.argv[2], caffe.TEST) 28 | for k, v in net.params.items(): 29 | print("Layer %s, has %d params." % (k, len(v))) 30 | if len(v) == 1: 31 | W = v[0].data[...] 32 | W = np.transpose(W, (2, 3, 1, 0)) 33 | weights[k] = {"weights": W} 34 | elif len(v) == 2: 35 | W = v[0].data[...] 36 | W = np.transpose(W, (2, 3, 1, 0)) 37 | b = v[1].data[...] 38 | weights[k] = {"weights": W, "biases": b} 39 | elif len(v) == 4: # Batchnorm layer 40 | k = k.replace('/', '_') 41 | scale = v[0].data[...] 42 | offset = v[1].data[...] 43 | mean = v[2].data[...] 44 | variance = v[3].data[...] 45 | 46 | weights[k] = {"mean": mean, "variance": variance, 47 | "scale": scale, "offset": offset} 48 | 49 | else: 50 | print("Undefined layer") 51 | exit() 52 | 53 | arr = np.asarray(weights) 54 | weights_name = splitext(sys.argv[2])[0] + ".npy" 55 | np.save(weights_name.lower(), arr) 56 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | *.zip 9 | 10 | # Distribution / packaging 11 | .Python 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | temp/ 30 | loaded_images/ 31 | tits2tits_masked/ 32 | __MACOSX/ 33 | 34 | # PyInstaller 35 | # Usually these files are written by a python script from a template 36 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 37 | *.manifest 38 | *.spec 39 | 40 | # Installer logs 41 | pip-log.txt 42 | pip-delete-this-directory.txt 43 | 44 | # Unit test / coverage reports 45 | htmlcov/ 46 | .tox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *.cover 53 | .hypothesis/ 54 | .pytest_cache/ 55 | 56 | # Translations 57 | *.mo 58 | *.pot 59 | 60 | # Django stuff: 61 | *.log 62 | local_settings.py 63 | db.sqlite3 64 | 65 | # Flask stuff: 66 | instance/ 67 | .webassets-cache 68 | 69 | # Scrapy stuff: 70 | .scrapy 71 | 72 | # Sphinx documentation 73 | docs/_build/ 74 | 75 | # PyBuilder 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # pyenv 82 | .python-version 83 | 84 | # celery beat schedule file 85 | celerybeat-schedule 86 | 87 | # SageMath parsed files 88 | *.sage.py 89 | 90 | # Environments 91 | .env 92 | .venv 93 | env/ 94 | venv/ 95 | ENV/ 96 | env.bak/ 97 | venv.bak/ 98 | 99 | # Spyder project settings 100 | .spyderproject 101 | .spyproject 102 | 103 | # Rope project settings 104 | .ropeproject 105 | 106 | # mkdocs documentation 107 | /site 108 | 109 | # mypy 110 | .mypy_cache/ 111 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/caffe-tensorflow/examples/imagenet/README.md: -------------------------------------------------------------------------------- 1 | # ImageNet Examples 2 | 3 | This folder contains two examples that demonstrate how to use converted networks for 4 | image classification. Also included are sample converted models and helper scripts. 5 | 6 | ## 1. Image Classification 7 | 8 | `classify.py` uses a GoogleNet trained on ImageNet, converted to TensorFlow, for classifying images. 9 | 10 | The architecture used is defined in `models/googlenet.py` (which was auto-generated). You will need 11 | to download and convert the weights from Caffe to run the example. The download link for the 12 | corresponding weights can be found in Caffe's `models/bvlc_googlenet/` folder. 13 | 14 | You can run this example like so: 15 | 16 | $ ./classify.py /path/to/googlenet.npy ~/pics/kitty.png ~/pics/woof.jpg 17 | 18 | You should expect to see an output similar to this: 19 | 20 | Image Classified As Confidence 21 | ---------------------------------------------------------------------- 22 | kitty.png Persian cat 99.75 % 23 | woof.jpg Bernese mountain dog 82.02 % 24 | 25 | 26 | ## 2. ImageNet Validation 27 | 28 | `validate.py` evaluates a converted model against the ImageNet (ILSVRC12) validation set. To run 29 | this script, you will need a copy of the ImageNet validation set. You can run it as follows: 30 | 31 | $ ./validate.py alexnet.npy val.txt imagenet-val/ --model AlexNet 32 | 33 | The validation results specified in the main readme were generated using this script. 34 | 35 | ## Helper Scripts 36 | 37 | In addition to the examples above, this folder includes a few additional files: 38 | 39 | - `dataset.py` : helper script for loading, pre-processing, and iterating over images 40 | - `models/` : contains converted models (auto-generated) 41 | - `models/helper.py` : describes how the data should be preprocessed for each model 42 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/caffe-tensorflow/examples/mnist/finetune_mnist.py: -------------------------------------------------------------------------------- 1 | # Import the converted model's class 2 | import numpy as np 3 | import random 4 | import tensorflow as tf 5 | from tensorflow.examples.tutorials.mnist import input_data 6 | 7 | from mynet import LeNet as MyNet 8 | 9 | mnist = input_data.read_data_sets('MNIST_data', one_hot=True) 10 | batch_size = 32 11 | 12 | def gen_data(source): 13 | while True: 14 | indices = range(len(source.images)) 15 | random.shuffle(indices) 16 | for i in indices: 17 | image = np.reshape(source.images[i], (28, 28, 1)) 18 | label = source.labels[i] 19 | yield image, label 20 | 21 | def gen_data_batch(source): 22 | data_gen = gen_data(source) 23 | while True: 24 | image_batch = [] 25 | label_batch = [] 26 | for _ in range(batch_size): 27 | image, label = next(data_gen) 28 | image_batch.append(image) 29 | label_batch.append(label) 30 | yield np.array(image_batch), np.array(label_batch) 31 | 32 | 33 | images = tf.placeholder(tf.float32, [batch_size, 28, 28, 1]) 34 | labels = tf.placeholder(tf.float32, [batch_size, 10]) 35 | net = MyNet({'data': images}) 36 | 37 | ip2 = net.layers['ip2'] 38 | pred = tf.nn.softmax(ip2) 39 | 40 | loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(ip2, labels), 0) 41 | opt = tf.train.RMSPropOptimizer(0.001) 42 | train_op = opt.minimize(loss) 43 | 44 | with tf.Session() as sess: 45 | # Load the data 46 | sess.run(tf.initialize_all_variables()) 47 | net.load('mynet.npy', sess) 48 | 49 | data_gen = gen_data_batch(mnist.train) 50 | for i in range(1000): 51 | np_images, np_labels = next(data_gen) 52 | feed = {images: np_images, labels: np_labels} 53 | 54 | np_loss, np_pred, _ = sess.run([loss, pred, train_op], feed_dict=feed) 55 | if i % 10 == 0: 56 | print('Iteration: ', i, np_loss) 57 | -------------------------------------------------------------------------------- /ops.p: -------------------------------------------------------------------------------- 1 | import math 2 | import numpy as np 3 | import tensorflow as tf 4 | import tensorflow.contrib.slim as slim 5 | from tensorflow.python.framework import ops 6 | 7 | from utilsc import * 8 | 9 | def batch_norm(x, name="batch_norm"): 10 | return tf.contrib.layers.batch_norm(x, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, scope=name) 11 | 12 | def instance_norm(input, name="instance_norm"): 13 | with tf.variable_scope(name): 14 | depth = input.get_shape()[3] 15 | scale = tf.get_variable("scale", [depth], initializer=tf.random_normal_initializer(1.0, 0.02, dtype=tf.float32)) 16 | offset = tf.get_variable("offset", [depth], initializer=tf.constant_initializer(0.0)) 17 | mean, variance = tf.nn.moments(input, axes=[1,2], keep_dims=True) 18 | epsilon = 1e-5 19 | inv = tf.rsqrt(variance + epsilon) 20 | normalized = (input-mean)*inv 21 | return scale*normalized + offset 22 | 23 | def conv2d(input_, output_dim, ks=4, s=2, stddev=0.02, padding='SAME', name="conv2d"): 24 | with tf.variable_scope(name): 25 | return slim.conv2d(input_, output_dim, ks, s, padding=padding, activation_fn=None, 26 | weights_initializer=tf.truncated_normal_initializer(stddev=stddev), 27 | biases_initializer=None) 28 | 29 | def deconv2d(input_, output_dim, ks=4, s=2, stddev=0.02, name="deconv2d"): 30 | with tf.variable_scope(name): 31 | return slim.conv2d_transpose(input_, output_dim, ks, s, padding='SAME', activation_fn=None, 32 | weights_initializer=tf.truncated_normal_initializer(stddev=stddev), 33 | biases_initializer=None) 34 | 35 | def lrelu(x, leak=0.2, name="lrelu"): 36 | return tf.maximum(x, leak*x) 37 | 38 | def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False): 39 | 40 | with tf.variable_scope(scope or "Linear"): 41 | matrix = tf.get_variable("Matrix", [input_.get_shape()[-1], output_size], tf.float32, 42 | tf.random_normal_initializer(stddev=stddev)) 43 | bias = tf.get_variable("bias", [output_size], 44 | initializer=tf.constant_initializer(bias_start)) 45 | if with_w: 46 | return tf.matmul(input_, matrix) + bias, matrix, bias 47 | else: 48 | return tf.matmul(input_, matrix) + bias 49 | -------------------------------------------------------------------------------- /ops.py: -------------------------------------------------------------------------------- 1 | import math 2 | import numpy as np 3 | import tensorflow as tf 4 | import tensorflow.contrib.slim as slim 5 | from tensorflow.python.framework import ops 6 | 7 | from utilsc import * 8 | 9 | def batch_norm(x, name="batch_norm"): 10 | return tf.contrib.layers.batch_norm(x, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, scope=name) 11 | 12 | def instance_norm(input, name="instance_norm"): 13 | with tf.variable_scope(name): 14 | depth = input.get_shape()[3] 15 | scale = tf.get_variable("scale", [depth], initializer=tf.random_normal_initializer(1.0, 0.02, dtype=tf.float32)) 16 | offset = tf.get_variable("offset", [depth], initializer=tf.constant_initializer(0.0)) 17 | mean, variance = tf.nn.moments(input, axes=[1,2], keep_dims=True) 18 | epsilon = 1e-5 19 | inv = tf.rsqrt(variance + epsilon) 20 | normalized = (input-mean)*inv 21 | return scale*normalized + offset 22 | 23 | def conv2d(input_, output_dim, ks=4, s=2, stddev=0.02, padding='SAME', name="conv2d"): 24 | with tf.variable_scope(name): 25 | return slim.conv2d(input_, output_dim, ks, s, padding=padding, activation_fn=None, 26 | weights_initializer=tf.truncated_normal_initializer(stddev=stddev), 27 | biases_initializer=None) 28 | 29 | def deconv2d(input_, output_dim, ks=4, s=2, stddev=0.02, name="deconv2d"): 30 | with tf.variable_scope(name): 31 | return slim.conv2d_transpose(input_, output_dim, ks, s, padding='SAME', activation_fn=None, 32 | weights_initializer=tf.truncated_normal_initializer(stddev=stddev), 33 | biases_initializer=None) 34 | 35 | def lrelu(x, leak=0.2, name="lrelu"): 36 | return tf.maximum(x, leak*x) 37 | 38 | def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False): 39 | 40 | with tf.variable_scope(scope or "Linear"): 41 | matrix = tf.get_variable("Matrix", [input_.get_shape()[-1], output_size], tf.float32, 42 | tf.random_normal_initializer(stddev=stddev)) 43 | bias = tf.get_variable("bias", [output_size], 44 | initializer=tf.constant_initializer(bias_start)) 45 | if with_w: 46 | return tf.matmul(input_, matrix) + bias, matrix, bias 47 | else: 48 | return tf.matmul(input_, matrix) + bias 49 | -------------------------------------------------------------------------------- /CycleGAN-tensorflow/ops.py: -------------------------------------------------------------------------------- 1 | import math 2 | import numpy as np 3 | import tensorflow as tf 4 | import tensorflow.contrib.slim as slim 5 | from tensorflow.python.framework import ops 6 | 7 | from utilsc import * 8 | 9 | def batch_norm(x, name="batch_norm"): 10 | return tf.contrib.layers.batch_norm(x, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, scope=name) 11 | 12 | def instance_norm(input, name="instance_norm"): 13 | with tf.variable_scope(name): 14 | depth = input.get_shape()[3] 15 | scale = tf.get_variable("scale", [depth], initializer=tf.random_normal_initializer(1.0, 0.02, dtype=tf.float32)) 16 | offset = tf.get_variable("offset", [depth], initializer=tf.constant_initializer(0.0)) 17 | mean, variance = tf.nn.moments(input, axes=[1,2], keep_dims=True) 18 | epsilon = 1e-5 19 | inv = tf.rsqrt(variance + epsilon) 20 | normalized = (input-mean)*inv 21 | return scale*normalized + offset 22 | 23 | def conv2d(input_, output_dim, ks=4, s=2, stddev=0.02, padding='SAME', name="conv2d"): 24 | with tf.variable_scope(name): 25 | return slim.conv2d(input_, output_dim, ks, s, padding=padding, activation_fn=None, 26 | weights_initializer=tf.truncated_normal_initializer(stddev=stddev), 27 | biases_initializer=None) 28 | 29 | def deconv2d(input_, output_dim, ks=4, s=2, stddev=0.02, name="deconv2d"): 30 | with tf.variable_scope(name): 31 | return slim.conv2d_transpose(input_, output_dim, ks, s, padding='SAME', activation_fn=None, 32 | weights_initializer=tf.truncated_normal_initializer(stddev=stddev), 33 | biases_initializer=None) 34 | 35 | def lrelu(x, leak=0.2, name="lrelu"): 36 | return tf.maximum(x, leak*x) 37 | 38 | def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False): 39 | 40 | with tf.variable_scope(scope or "Linear"): 41 | matrix = tf.get_variable("Matrix", [input_.get_shape()[-1], output_size], tf.float32, 42 | tf.random_normal_initializer(stddev=stddev)) 43 | bias = tf.get_variable("bias", [output_size], 44 | initializer=tf.constant_initializer(bias_start)) 45 | if with_w: 46 | return tf.matmul(input_, matrix) + bias, matrix, bias 47 | else: 48 | return tf.matmul(input_, matrix) + bias 49 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/caffe-tensorflow/convert.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | import sys 5 | import numpy as np 6 | import argparse 7 | from kaffe import KaffeError, print_stderr 8 | from kaffe.tensorflow import TensorFlowTransformer 9 | 10 | 11 | def fatal_error(msg): 12 | print_stderr(msg) 13 | exit(-1) 14 | 15 | 16 | def validate_arguments(args): 17 | if (args.data_output_path is not None) and (args.caffemodel is None): 18 | fatal_error('No input data path provided.') 19 | if (args.caffemodel is not None) and (args.data_output_path is None): 20 | fatal_error('No output data path provided.') 21 | if (args.code_output_path is None) and (args.data_output_path is None): 22 | fatal_error('No output path specified.') 23 | 24 | 25 | def convert(def_path, caffemodel_path, data_output_path, code_output_path, phase): 26 | try: 27 | transformer = TensorFlowTransformer(def_path, caffemodel_path, phase=phase) 28 | print_stderr('Converting data...') 29 | if caffemodel_path is not None: 30 | data = transformer.transform_data() 31 | print_stderr('Saving data...') 32 | with open(data_output_path, 'wb') as data_out: 33 | np.save(data_out, data) 34 | if code_output_path: 35 | print_stderr('Saving source...') 36 | with open(code_output_path, 'wb') as src_out: 37 | src_out.write(transformer.transform_source()) 38 | print_stderr('Done.') 39 | except KaffeError as err: 40 | fatal_error('Error encountered: {}'.format(err)) 41 | 42 | 43 | def main(): 44 | parser = argparse.ArgumentParser() 45 | parser.add_argument('def_path', help='Model definition (.prototxt) path') 46 | parser.add_argument('--caffemodel', help='Model data (.caffemodel) path') 47 | parser.add_argument('--data-output-path', help='Converted data output path') 48 | parser.add_argument('--code-output-path', help='Save generated source to this path') 49 | parser.add_argument('-p', 50 | '--phase', 51 | default='test', 52 | help='The phase to convert: test (default) or train') 53 | args = parser.parse_args() 54 | validate_arguments(args) 55 | convert(args.def_path, args.caffemodel, args.data_output_path, args.code_output_path, 56 | args.phase) 57 | 58 | 59 | if __name__ == '__main__': 60 | main() 61 | -------------------------------------------------------------------------------- /python_utils/preprocessing.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | import numpy as np 4 | from scipy.misc import imresize, imread 5 | from scipy.ndimage import zoom 6 | from collections import defaultdict 7 | 8 | DATA_MEAN = np.array([[[123.68, 116.779, 103.939]]]) 9 | 10 | def preprocess_img(img, input_shape): 11 | img = imresize(img, input_shape) 12 | img = img - DATA_MEAN 13 | img = img[:, :, ::-1] 14 | img.astype('float32') 15 | return img 16 | 17 | def update_inputs(batch_size = None, input_size = None, num_classes = None): 18 | return np.zeros([batch_size, input_size[0], input_size[1], 3]), \ 19 | np.zeros([batch_size, input_size[0], input_size[1], num_classes]) 20 | 21 | def data_generator_s31(datadir='', nb_classes = None, batch_size = None, input_size=None, separator='_', test_nmb=50): 22 | if not os.path.exists(datadir): 23 | print("ERROR!The folder is not exist") 24 | #listdir = os.listdir(datadir) 25 | data = defaultdict(dict) 26 | image_dir = os.path.join(datadir, "imgs") 27 | image_paths = os.listdir(image_dir) 28 | for image_path in image_paths: 29 | nmb = image_path.split(separator)[0] 30 | data[nmb]['image'] = image_path 31 | anno_dir = os.path.join(datadir, "maps_bordered") 32 | anno_paths = os.listdir(anno_dir) 33 | for anno_path in anno_paths: 34 | nmb = anno_path.split(separator)[0] 35 | data[nmb]['anno'] = anno_path 36 | values = data.values() 37 | random.shuffle(values) 38 | return generate(values[test_nmb:], nb_classes, batch_size, input_size, image_dir, anno_dir), \ 39 | generate(values[:test_nmb], nb_classes, batch_size, input_size, image_dir, anno_dir) 40 | 41 | def generate(values, nb_classes, batch_size, input_size, image_dir, anno_dir): 42 | while 1: 43 | random.shuffle(values) 44 | images, labels = update_inputs(batch_size=batch_size, 45 | input_size=input_size, num_classes=nb_classes) 46 | for i, d in enumerate(values): 47 | img = imresize(imread(os.path.join(image_dir, d['image']), mode='RGB'), input_size) 48 | y = imread(os.path.join(anno_dir, d['anno']), mode='L') 49 | h, w = input_size 50 | y = zoom(y, (1.*h/y.shape[0], 1.*w/y.shape[1]), order=1, prefilter=False) 51 | y = (np.arange(nb_classes) == y[:,:,None]).astype('float32') 52 | assert y.shape[2] == nb_classes 53 | images[i % batch_size] = img 54 | labels[i % batch_size] = y 55 | if (i + 1) % batch_size == 0: 56 | yield images, labels 57 | images, labels = update_inputs(batch_size=batch_size, 58 | input_size=input_size, num_classes=nb_classes) 59 | 60 | 61 | 62 | -------------------------------------------------------------------------------- /pascal_voc_labels.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # Pascal VOC labels 4 | # 5 | 6 | from collections import namedtuple 7 | import numpy as np 8 | 9 | Label = namedtuple('Label', [ 10 | 11 | 'name', 12 | 'id', 13 | 'color' 14 | ]) 15 | 16 | labels = [Label('background', 0, (0, 0, 0)), 17 | Label('aeroplane', 1, (128, 0, 0)), 18 | Label('bicycle', 2, (0, 128, 0)), 19 | Label('bird', 3, (128, 128, 0)), 20 | Label('boat', 4, (0, 0, 128)), 21 | Label('bottle', 5, (128, 0, 128)), 22 | Label('bus', 6, (0, 128, 128)), 23 | Label('car', 7, (128, 128, 128)), 24 | Label('cat', 8, (64, 0, 0)), 25 | Label('chair', 9, (192, 0, 0)), 26 | Label('cow', 10, (64, 128, 0)), 27 | Label('diningtable', 11, (192, 128, 0)), 28 | Label('dog', 12, (64, 0, 128)), 29 | Label('horse', 13, (192, 0, 128)), 30 | Label('motorbike', 14, (64, 128, 128)), 31 | Label('person', 15, (192, 128, 128)), 32 | Label('pottedplant', 16, (0, 64, 0)), 33 | Label('sheep', 17, (128, 64, 0)), 34 | Label('sofa', 18, (0, 192, 0)), 35 | Label('train', 19, (128, 192, 0)), 36 | Label('tvmonitor', 20, (0, 64, 128)), 37 | Label('void', 21, (128, 64, 12))] 38 | 39 | voc_id2label = {label.id: label for label in labels} 40 | 41 | 42 | def generate_color_map(N=256, normalized=False): 43 | """from https://gist.github.com/wllhf/a4533e0adebe57e3ed06d4b50c8419ae .""" 44 | def bitget(byteval, idx): 45 | return ((byteval & (1 << idx)) != 0) 46 | 47 | dtype = 'float32' if normalized else 'uint8' 48 | cmap = np.zeros((N, 3), dtype=dtype) 49 | for i in range(N): 50 | r = g = b = 0 51 | c = i 52 | for j in range(8): 53 | r = r | (bitget(c, 0) << 7 - j) 54 | g = g | (bitget(c, 1) << 7 - j) 55 | b = b | (bitget(c, 2) << 7 - j) 56 | c = c >> 3 57 | 58 | cmap[i] = np.array([r, g, b]) 59 | 60 | cmap = cmap / 255 if normalized else cmap 61 | return cmap 62 | 63 | 64 | def generate_voc_labels(): 65 | labels = ['background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 66 | 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor', 'void'] 67 | color_map = generate_color_map() 68 | for id, name in enumerate(labels): 69 | color = color_map[id] 70 | print("Label(\'%s\', %i, (%i, %i, %i))," % 71 | (name, id, color[0], color[1], color[2])) 72 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/python_utils/preprocessing.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | import numpy as np 4 | from scipy.misc import imresize, imread 5 | from scipy.ndimage import zoom 6 | from collections import defaultdict 7 | 8 | DATA_MEAN = np.array([[[123.68, 116.779, 103.939]]]) 9 | 10 | def preprocess_img(img, input_shape): 11 | img = imresize(img, input_shape) 12 | img = img - DATA_MEAN 13 | img = img[:, :, ::-1] 14 | img.astype('float32') 15 | return img 16 | 17 | def update_inputs(batch_size = None, input_size = None, num_classes = None): 18 | return np.zeros([batch_size, input_size[0], input_size[1], 3]), \ 19 | np.zeros([batch_size, input_size[0], input_size[1], num_classes]) 20 | 21 | def data_generator_s31(datadir='', nb_classes = None, batch_size = None, input_size=None, separator='_', test_nmb=50): 22 | if not os.path.exists(datadir): 23 | print("ERROR!The folder is not exist") 24 | #listdir = os.listdir(datadir) 25 | data = defaultdict(dict) 26 | image_dir = os.path.join(datadir, "imgs") 27 | image_paths = os.listdir(image_dir) 28 | for image_path in image_paths: 29 | nmb = image_path.split(separator)[0] 30 | data[nmb]['image'] = image_path 31 | anno_dir = os.path.join(datadir, "maps_bordered") 32 | anno_paths = os.listdir(anno_dir) 33 | for anno_path in anno_paths: 34 | nmb = anno_path.split(separator)[0] 35 | data[nmb]['anno'] = anno_path 36 | values = data.values() 37 | random.shuffle(values) 38 | return generate(values[test_nmb:], nb_classes, batch_size, input_size, image_dir, anno_dir), \ 39 | generate(values[:test_nmb], nb_classes, batch_size, input_size, image_dir, anno_dir) 40 | 41 | def generate(values, nb_classes, batch_size, input_size, image_dir, anno_dir): 42 | while 1: 43 | random.shuffle(values) 44 | images, labels = update_inputs(batch_size=batch_size, 45 | input_size=input_size, num_classes=nb_classes) 46 | for i, d in enumerate(values): 47 | img = imresize(imread(os.path.join(image_dir, d['image']), mode='RGB'), input_size) 48 | y = imread(os.path.join(anno_dir, d['anno']), mode='L') 49 | h, w = input_size 50 | y = zoom(y, (1.*h/y.shape[0], 1.*w/y.shape[1]), order=1, prefilter=False) 51 | y = (np.arange(nb_classes) == y[:,:,None]).astype('float32') 52 | assert y.shape[2] == nb_classes 53 | images[i % batch_size] = img 54 | labels[i % batch_size] = y 55 | if (i + 1) % batch_size == 0: 56 | yield images, labels 57 | images, labels = update_inputs(batch_size=batch_size, 58 | input_size=input_size, num_classes=nb_classes) 59 | 60 | 61 | 62 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/pascal_voc_labels.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # Pascal VOC labels 4 | # 5 | 6 | from collections import namedtuple 7 | import numpy as np 8 | 9 | Label = namedtuple('Label', [ 10 | 11 | 'name', 12 | 'id', 13 | 'color' 14 | ]) 15 | 16 | labels = [Label('background', 0, (0, 0, 0)), 17 | Label('aeroplane', 1, (128, 0, 0)), 18 | Label('bicycle', 2, (0, 128, 0)), 19 | Label('bird', 3, (128, 128, 0)), 20 | Label('boat', 4, (0, 0, 128)), 21 | Label('bottle', 5, (128, 0, 128)), 22 | Label('bus', 6, (0, 128, 128)), 23 | Label('car', 7, (128, 128, 128)), 24 | Label('cat', 8, (64, 0, 0)), 25 | Label('chair', 9, (192, 0, 0)), 26 | Label('cow', 10, (64, 128, 0)), 27 | Label('diningtable', 11, (192, 128, 0)), 28 | Label('dog', 12, (64, 0, 128)), 29 | Label('horse', 13, (192, 0, 128)), 30 | Label('motorbike', 14, (64, 128, 128)), 31 | Label('person', 15, (192, 128, 128)), 32 | Label('pottedplant', 16, (0, 64, 0)), 33 | Label('sheep', 17, (128, 64, 0)), 34 | Label('sofa', 18, (0, 192, 0)), 35 | Label('train', 19, (128, 192, 0)), 36 | Label('tvmonitor', 20, (0, 64, 128)), 37 | Label('void', 21, (128, 64, 12))] 38 | 39 | voc_id2label = {label.id: label for label in labels} 40 | 41 | 42 | def generate_color_map(N=256, normalized=False): 43 | """from https://gist.github.com/wllhf/a4533e0adebe57e3ed06d4b50c8419ae .""" 44 | def bitget(byteval, idx): 45 | return ((byteval & (1 << idx)) != 0) 46 | 47 | dtype = 'float32' if normalized else 'uint8' 48 | cmap = np.zeros((N, 3), dtype=dtype) 49 | for i in range(N): 50 | r = g = b = 0 51 | c = i 52 | for j in range(8): 53 | r = r | (bitget(c, 0) << 7 - j) 54 | g = g | (bitget(c, 1) << 7 - j) 55 | b = b | (bitget(c, 2) << 7 - j) 56 | c = c >> 3 57 | 58 | cmap[i] = np.array([r, g, b]) 59 | 60 | cmap = cmap / 255 if normalized else cmap 61 | return cmap 62 | 63 | 64 | def generate_voc_labels(): 65 | labels = ['background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 66 | 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor', 'void'] 67 | color_map = generate_color_map() 68 | for id, name in enumerate(labels): 69 | color = color_map[id] 70 | print("Label(\'%s\', %i, (%i, %i, %i))," % 71 | (name, id, color[0], color[1], color[2])) 72 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/drawImage/drawModule.py: -------------------------------------------------------------------------------- 1 | from PIL import Image, ImageDraw 2 | import scipy.ndimage 3 | import scipy.io 4 | import numpy as np 5 | import time 6 | import copy 7 | 8 | 9 | class BaseDraw: 10 | def __init__(self, color150, objectNames, img, pred_size, predicted_classes): 11 | self.class_colors = scipy.io.loadmat(color150) 12 | self.class_names = scipy.io.loadmat(objectNames, struct_as_record=False) 13 | self.im = img 14 | self.pred_size = pred_size 15 | self.predicted_classes = copy.deepcopy(predicted_classes) 16 | self.original_W = self.im.size[0] 17 | self.original_H = self.im.size[1] 18 | 19 | self.output_W = self.original_W 20 | self.output_H = self.original_H 21 | 22 | 23 | def dumpArray(self, array, i): 24 | test = array*100 25 | test = Image.fromarray(test.astype('uint8')) 26 | test = test.convert("RGB") 27 | test.save('/home/vlad/oS_AI/'+str(i)+'t.jpg', "JPEG") 28 | 29 | def calculateResize(self): 30 | W_coef = float(self.original_W)/float(self.output_W) 31 | H_coef = float(self.original_H)/float(self.output_H) 32 | horiz_pad = 0 33 | vert_pad = 0 34 | if W_coef > H_coef: 35 | coef = W_coef 36 | horiz_pad = int((self.output_H - self.original_H/coef)/2) 37 | return [coef, horiz_pad, vert_pad] 38 | else: 39 | coef = H_coef 40 | vert_pad = int((self.output_W - self.original_W/coef)/2) 41 | return [coef, horiz_pad, vert_pad] 42 | 43 | 44 | def resizeToOutput(self, image, coef, h_pad, w_pad): 45 | image = image.resize((int(self.original_W/coef), int(self.original_H/coef)), resample=Image.BILINEAR) 46 | outputImage = Image.new("RGB",(self.output_W,self.output_H),(0,0,0)) 47 | outputImage.paste(image,(w_pad,h_pad)) 48 | return outputImage 49 | 50 | 51 | 52 | def drawSimpleSegment(self): 53 | 54 | #Drawing module 55 | im_Width, im_Height = self.pred_size 56 | prediction_image = Image.new("RGB", (im_Width, im_Height) ,(0,0,0)) 57 | prediction_imageDraw = ImageDraw.Draw(prediction_image) 58 | 59 | #BASE all image segmentation 60 | for i in range(im_Width): 61 | for j in range(im_Height): 62 | #get matrix element class(0-149) 63 | px_Class = self.predicted_classes[j][i] 64 | #assign color from .mat list 65 | put_Px_Color = tuple(self.class_colors['colors'][px_Class]) 66 | 67 | #drawing 68 | prediction_imageDraw.point((i,j), fill=put_Px_Color) 69 | 70 | #Resize to original size and save 71 | self.coef, self.h_pad, self.w_pad = self.calculateResize() 72 | FullHdOutImage = self.resizeToOutput(prediction_image, self.coef, self.h_pad, self.w_pad) 73 | FullHdOutImage = Image.blend(FullHdOutImage, self.im, 0.5) 74 | 75 | return FullHdOutImage 76 | -------------------------------------------------------------------------------- /tbot.py: -------------------------------------------------------------------------------- 1 | import time 2 | import telebot 3 | import requests 4 | 5 | TOKEN = "" 6 | KERAS_REST_API_URL = "" 7 | 8 | bot = telebot.TeleBot(TOKEN) 9 | 10 | 11 | @bot.message_handler(commands=['start']) 12 | def send_welcome(message): 13 | bot.reply_to(message, 'Hey Start') 14 | 15 | 16 | @bot.message_handler(commands=['info']) 17 | def send_welcome(message): 18 | info = ('Hey Info') 19 | bot.reply_to(message, info) 20 | 21 | 22 | import uuid 23 | 24 | @bot.message_handler(content_types=["photo"]) 25 | def answer_photo(message): 26 | photo = bot.get_file(message.photo[-1].file_id) 27 | # URL direction to image 28 | photo_url = "https://api.telegram.org/file/bot{0}/{1}".format( 29 | TOKEN, photo.file_path) 30 | # Computer Vision parameters 31 | r = requests.get(photo_url) 32 | file_name = str(uuid.uuid4()) + '.png' 33 | if r.status_code == 200: 34 | with open('temp/' + file_name, 'wb') as f: 35 | f.write(r.content) 36 | else: 37 | bot.reply_to(message, 'something fails...') 38 | return 39 | 40 | img = open('temp/' + file_name, 'rb') 41 | 42 | #img = open('inpred.png', 'rb') 43 | 44 | payload = {"image":img} 45 | 46 | bot.send_chat_action(message.chat.id, 'typing') 47 | try: 48 | r = requests.post(KERAS_REST_API_URL, files=payload).json() 49 | except: 50 | bot.reply_to(message, 'something fails....') 51 | print(r) 52 | time.sleep(1) 53 | 54 | img_path = None 55 | try: 56 | if r['success']: 57 | img_path = r['result_path'] 58 | img_result = open(img_path, 'rb') 59 | bot.reply_to(message, photo_url) 60 | bot.send_photo(message.chat.id, img_result, reply_to_message_id=message.message_id) 61 | 62 | img_path = r['mask_path'] 63 | img_result = open(img_path, 'rb') 64 | bot.reply_to(message, photo_url) 65 | bot.send_photo(message.chat.id, img_result, reply_to_message_id=message.message_id) 66 | 67 | img_path = r['cg_path'] 68 | img_result = open(img_path, 'rb') 69 | bot.reply_to(message, photo_url) 70 | bot.send_photo(message.chat.id, img_result, reply_to_message_id=message.message_id) 71 | else: 72 | bot.reply_to(message, 'something fails...') 73 | except: 74 | bot.reply_to(message, 'something fails...') 75 | 76 | @bot.message_handler(func=lambda m: True) 77 | def reply_all(message): 78 | if message.chat.type == "private": 79 | bot.reply_to(message, 'Please send me an image so I can describe it!') 80 | 81 | 82 | bot.polling(none_stop=True) 83 | 84 | 85 | while True: 86 | time.sleep(5) 87 | 88 | 89 | -------------------------------------------------------------------------------- /python_utils/utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import colorsys 3 | import numpy as np 4 | from keras.models import Model 5 | from cityscapes_labels import trainId2label 6 | from ade20k_labels import ade20k_id2label 7 | from pascal_voc_labels import voc_id2label 8 | 9 | 10 | def class_image_to_image(class_id_image, class_id_to_rgb_map): 11 | """Map the class image to a rgb-color image.""" 12 | colored_image = np.zeros( 13 | (class_id_image.shape[0], class_id_image.shape[1], 3), np.uint8) 14 | for row in range(class_id_image.shape[0]): 15 | for col in range(class_id_image.shape[1]): 16 | try: 17 | colored_image[row, col, :] = class_id_to_rgb_map[ 18 | int(class_id_image[row, col])].color 19 | except KeyError as key_error: 20 | print("Warning: could not resolve classid %s" % key_error) 21 | return colored_image 22 | 23 | 24 | def color_class_image(class_image, model_name): 25 | """Color classed depending on the model used.""" 26 | if 'cityscapes' in model_name: 27 | colored_image = class_image_to_image(class_image, trainId2label) 28 | elif 'voc' in model_name: 29 | colored_image = class_image_to_image(class_image, voc_id2label) 30 | elif 'ade20k' in model_name: 31 | colored_image = class_image_to_image(class_image, ade20k_id2label) 32 | else: 33 | colored_image = add_color(class_image) 34 | return colored_image 35 | 36 | 37 | def add_color(img, num_classes=32): 38 | h, w = img.shape 39 | img_color = np.zeros((h, w, 3)) 40 | for i in range(1, 151): 41 | img_color[img == i] = to_color(i) 42 | img_color[img == num_classes] = (1.0, 1.0, 1.0) 43 | return img_color 44 | 45 | 46 | def to_color(category): 47 | """Map each category color a good distance away from each other on the HSV color space.""" 48 | v = (category - 1) * (137.5 / 360) 49 | return colorsys.hsv_to_rgb(v, 1, 1) 50 | 51 | 52 | def debug(model, data): 53 | """Debug model by printing the activations in each layer.""" 54 | names = [layer.name for layer in model.layers] 55 | for name in names[:]: 56 | print_activation(model, name, data) 57 | 58 | 59 | def print_activation(model, layer_name, data): 60 | """Print the activations in each layer.""" 61 | intermediate_layer_model = Model(inputs=model.input, 62 | outputs=model.get_layer(layer_name).output) 63 | io = intermediate_layer_model.predict(data) 64 | print(layer_name, array_to_str(io)) 65 | 66 | 67 | def array_to_str(a): 68 | return "{} {} {} {} {}".format(a.dtype, a.shape, np.min(a), 69 | np.max(a), np.mean(a)) 70 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/python_utils/utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import colorsys 3 | import numpy as np 4 | from keras.models import Model 5 | from cityscapes_labels import trainId2label 6 | from ade20k_labels import ade20k_id2label 7 | from pascal_voc_labels import voc_id2label 8 | 9 | 10 | def class_image_to_image(class_id_image, class_id_to_rgb_map): 11 | """Map the class image to a rgb-color image.""" 12 | colored_image = np.zeros( 13 | (class_id_image.shape[0], class_id_image.shape[1], 3), np.uint8) 14 | for row in range(class_id_image.shape[0]): 15 | for col in range(class_id_image.shape[1]): 16 | try: 17 | colored_image[row, col, :] = class_id_to_rgb_map[ 18 | int(class_id_image[row, col])].color 19 | except KeyError as key_error: 20 | print("Warning: could not resolve classid %s" % key_error) 21 | return colored_image 22 | 23 | 24 | def color_class_image(class_image, model_name): 25 | """Color classed depending on the model used.""" 26 | if 'cityscapes' in model_name: 27 | colored_image = class_image_to_image(class_image, trainId2label) 28 | elif 'voc' in model_name: 29 | colored_image = class_image_to_image(class_image, voc_id2label) 30 | elif 'ade20k' in model_name: 31 | colored_image = class_image_to_image(class_image, ade20k_id2label) 32 | else: 33 | colored_image = add_color(class_image) 34 | return colored_image 35 | 36 | 37 | def add_color(img, num_classes=32): 38 | h, w = img.shape 39 | img_color = np.zeros((h, w, 3)) 40 | for i in range(1, 151): 41 | img_color[img == i] = to_color(i) 42 | img_color[img == num_classes] = (1.0, 1.0, 1.0) 43 | return img_color 44 | 45 | 46 | def to_color(category): 47 | """Map each category color a good distance away from each other on the HSV color space.""" 48 | v = (category - 1) * (137.5 / 360) 49 | return colorsys.hsv_to_rgb(v, 1, 1) 50 | 51 | 52 | def debug(model, data): 53 | """Debug model by printing the activations in each layer.""" 54 | names = [layer.name for layer in model.layers] 55 | for name in names[:]: 56 | print_activation(model, name, data) 57 | 58 | 59 | def print_activation(model, layer_name, data): 60 | """Print the activations in each layer.""" 61 | intermediate_layer_model = Model(inputs=model.input, 62 | outputs=model.get_layer(layer_name).output) 63 | io = intermediate_layer_model.predict(data) 64 | print(layer_name, array_to_str(io)) 65 | 66 | 67 | def array_to_str(a): 68 | return "{} {} {} {} {}".format(a.dtype, a.shape, np.min(a), 69 | np.max(a), np.mean(a)) 70 | -------------------------------------------------------------------------------- /process_all/test_align.py: -------------------------------------------------------------------------------- 1 | from align import forward_person_transform 2 | from align import inverse_person_transform 3 | from align import load_train_data, align_person 4 | 5 | import cv2 6 | import numpy as np 7 | 8 | IMAGES_PATH = 'images/' 9 | 10 | IMAGES = ['e1700.jpg', 'e1701.jpg', 'e1702.jpg', 'e1703.jpg', 'e1704.jpg'] 11 | MASKS = ['e1700_seg_read.png', 'e1701_seg_read.png', 'e1702_seg_read.png', 'e1703_seg_read.png', 'e1704_seg_read.png'] 12 | 13 | IMAGES_ALL = [IMAGES_PATH + i for i in IMAGES[1:]] 14 | MASKS_ALL = [IMAGES_PATH + i for i in MASKS[1:]] 15 | 16 | TEST_IMAGE = IMAGES_PATH + 'img_test.jpg' 17 | TEST_MASK = IMAGES_PATH + 'test_mask_voc_seg_read.jpg' 18 | 19 | def test_forward_transform(): 20 | image = cv2.imread(TEST_IMAGE) 21 | mask = cv2.imread(TEST_MASK, cv2.IMREAD_UNCHANGED) 22 | 23 | mask = np.array(mask > 0, np.uint8) 24 | 25 | person, person_mask = forward_person_transform(image, mask) 26 | 27 | cv2.imwrite('forward_transform_' + TEST_IMAGE.split('/')[-1], person) 28 | cv2.imwrite('forward_transform_' + TEST_MASK.split('/')[-1], person_mask * 255) 29 | 30 | def test_inverse_transform(): 31 | image = cv2.imread(TEST_IMAGE) 32 | mask = cv2.imread(TEST_MASK, cv2.IMREAD_UNCHANGED) 33 | 34 | mask = np.array(mask > 0, np.uint8) 35 | 36 | person, person_mask = forward_person_transform(image, mask) 37 | 38 | blended_image = inverse_person_transform(image, mask, person, person_mask) 39 | 40 | cv2.imwrite('inverse_person_transform_' + TEST_IMAGE.split('/')[-1], blended_image) 41 | 42 | 43 | def test_align_person(): 44 | image = cv2.imread(TEST_IMAGE) 45 | mask = cv2.imread(TEST_MASK, cv2.IMREAD_UNCHANGED) 46 | 47 | mask = np.array(mask > 0, np.uint8) 48 | 49 | person, person_mask = forward_person_transform(image, mask) 50 | train_images, train_masks = load_train_data(IMAGES_ALL, MASKS_ALL) 51 | 52 | aligned_image, aligned_mask = align_person(person, person_mask[..., None], train_images, train_masks) 53 | 54 | cv2.imwrite('align_transform_' + TEST_IMAGE.split('/')[-1], aligned_image) 55 | cv2.imwrite('align_transform_' + TEST_MASK.split('/')[-1], aligned_mask * 255) 56 | 57 | 58 | def test_align_person_partial(): 59 | image = cv2.imread(TEST_IMAGE) 60 | mask = cv2.imread(TEST_MASK, cv2.IMREAD_UNCHANGED) 61 | 62 | mask = np.array(mask > 0, np.uint8) 63 | 64 | mask[150:] = 0 65 | 66 | person, person_mask = forward_person_transform(image, mask) 67 | train_images, train_masks = load_train_data(IMAGES_ALL, MASKS_ALL) 68 | 69 | aligned_image, aligned_mask = align_person(person, person_mask[..., None], train_images, train_masks) 70 | 71 | cv2.imwrite('align_transform_partial_' + TEST_IMAGE.split('/')[-1], aligned_image) 72 | cv2.imwrite('align_transform_partial_' + TEST_MASK.split('/')[-1], aligned_mask * 255) 73 | 74 | 75 | test_forward_transform() 76 | test_inverse_transform() 77 | test_align_person() 78 | test_align_person_partial() 79 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/caffe-tensorflow/kaffe/shapes.py: -------------------------------------------------------------------------------- 1 | import math 2 | from collections import namedtuple 3 | 4 | from .errors import KaffeError 5 | 6 | TensorShape = namedtuple('TensorShape', ['batch_size', 'channels', 'height', 'width']) 7 | 8 | 9 | def get_filter_output_shape(i_h, i_w, params, round_func): 10 | o_h = (i_h + 2 * params.pad_h - params.kernel_h) / float(params.stride_h) + 1 11 | o_w = (i_w + 2 * params.pad_w - params.kernel_w) / float(params.stride_w) + 1 12 | return (int(round_func(o_h)), int(round_func(o_w))) 13 | 14 | 15 | def get_strided_kernel_output_shape(node, round_func): 16 | assert node.layer is not None 17 | input_shape = node.get_only_parent().output_shape 18 | o_h, o_w = get_filter_output_shape(input_shape.height, input_shape.width, 19 | node.layer.kernel_parameters, round_func) 20 | params = node.layer.parameters 21 | has_c_o = hasattr(params, 'num_output') 22 | c = params.num_output if has_c_o else input_shape.channels 23 | return TensorShape(input_shape.batch_size, c, o_h, o_w) 24 | 25 | 26 | def shape_not_implemented(node): 27 | raise NotImplementedError 28 | 29 | 30 | def shape_identity(node): 31 | assert len(node.parents) > 0 32 | return node.parents[0].output_shape 33 | 34 | 35 | def shape_scalar(node): 36 | return TensorShape(1, 1, 1, 1) 37 | 38 | 39 | def shape_data(node): 40 | if node.output_shape: 41 | # Old-style input specification 42 | return node.output_shape 43 | try: 44 | # New-style input specification 45 | return map(int, node.parameters.shape[0].dim) 46 | except: 47 | # We most likely have a data layer on our hands. The problem is, 48 | # Caffe infers the dimensions of the data from the source (eg: LMDB). 49 | # We want to avoid reading datasets here. Fail for now. 50 | # This can be temporarily fixed by transforming the data layer to 51 | # Caffe's "input" layer (as is usually used in the "deploy" version). 52 | # TODO: Find a better solution for this. 53 | raise KaffeError('Cannot determine dimensions of data layer.\n' 54 | 'See comments in function shape_data for more info.') 55 | 56 | 57 | def shape_mem_data(node): 58 | params = node.parameters 59 | return TensorShape(params.batch_size, params.channels, params.height, params.width) 60 | 61 | 62 | def shape_concat(node): 63 | axis = node.layer.parameters.axis 64 | output_shape = None 65 | for parent in node.parents: 66 | if output_shape is None: 67 | output_shape = list(parent.output_shape) 68 | else: 69 | output_shape[axis] += parent.output_shape[axis] 70 | return tuple(output_shape) 71 | 72 | 73 | def shape_convolution(node): 74 | return get_strided_kernel_output_shape(node, math.floor) 75 | 76 | 77 | def shape_pool(node): 78 | return get_strided_kernel_output_shape(node, math.ceil) 79 | 80 | 81 | def shape_inner_product(node): 82 | input_shape = node.get_only_parent().output_shape 83 | return TensorShape(input_shape.batch_size, node.layer.parameters.num_output, 1, 1) 84 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/caffe-tensorflow/examples/imagenet/classify.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import argparse 3 | import numpy as np 4 | import tensorflow as tf 5 | import os.path as osp 6 | 7 | import models 8 | import dataset 9 | 10 | 11 | def display_results(image_paths, probs): 12 | '''Displays the classification results given the class probability for each image''' 13 | # Get a list of ImageNet class labels 14 | with open('imagenet-classes.txt', 'rb') as infile: 15 | class_labels = map(str.strip, infile.readlines()) 16 | # Pick the class with the highest confidence for each image 17 | class_indices = np.argmax(probs, axis=1) 18 | # Display the results 19 | print('\n{:20} {:30} {}'.format('Image', 'Classified As', 'Confidence')) 20 | print('-' * 70) 21 | for img_idx, image_path in enumerate(image_paths): 22 | img_name = osp.basename(image_path) 23 | class_name = class_labels[class_indices[img_idx]] 24 | confidence = round(probs[img_idx, class_indices[img_idx]] * 100, 2) 25 | print('{:20} {:30} {} %'.format(img_name, class_name, confidence)) 26 | 27 | 28 | def classify(model_data_path, image_paths): 29 | '''Classify the given images using GoogleNet.''' 30 | 31 | # Get the data specifications for the GoogleNet model 32 | spec = models.get_data_spec(model_class=models.GoogleNet) 33 | 34 | # Create a placeholder for the input image 35 | input_node = tf.placeholder(tf.float32, 36 | shape=(None, spec.crop_size, spec.crop_size, spec.channels)) 37 | 38 | # Construct the network 39 | net = models.GoogleNet({'data': input_node}) 40 | 41 | # Create an image producer (loads and processes images in parallel) 42 | image_producer = dataset.ImageProducer(image_paths=image_paths, data_spec=spec) 43 | 44 | with tf.Session() as sesh: 45 | # Start the image processing workers 46 | coordinator = tf.train.Coordinator() 47 | threads = image_producer.start(session=sesh, coordinator=coordinator) 48 | 49 | # Load the converted parameters 50 | print('Loading the model') 51 | net.load(model_data_path, sesh) 52 | 53 | # Load the input image 54 | print('Loading the images') 55 | indices, input_images = image_producer.get(sesh) 56 | 57 | # Perform a forward pass through the network to get the class probabilities 58 | print('Classifying') 59 | probs = sesh.run(net.get_output(), feed_dict={input_node: input_images}) 60 | display_results([image_paths[i] for i in indices], probs) 61 | 62 | # Stop the worker threads 63 | coordinator.request_stop() 64 | coordinator.join(threads, stop_grace_period_secs=2) 65 | 66 | def main(): 67 | # Parse arguments 68 | parser = argparse.ArgumentParser() 69 | parser.add_argument('model_path', help='Converted parameters for the GoogleNet model') 70 | parser.add_argument('image_paths', nargs='+', help='One or more images to classify') 71 | args = parser.parse_args() 72 | 73 | # Classify the image 74 | classify(args.model_path, args.image_paths) 75 | 76 | 77 | if __name__ == '__main__': 78 | main() 79 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/README.md: -------------------------------------------------------------------------------- 1 | # Keras implementation of [PSPNet(caffe)](https://github.com/hszhao/PSPNet) 2 | 3 | Implemented Architecture of Pyramid Scene Parsing Network in Keras. 4 | 5 | ### Setup 6 | 1. Install dependencies: 7 | * Tensorflow (-gpu) 8 | * Keras 9 | * numpy 10 | * scipy 11 | * pycaffe(PSPNet)(optional for converting the weights) 12 | ```bash 13 | pip install -r requirements.txt --upgrade 14 | ``` 15 | 2. Converted trained weights are needed to run the network. 16 | Weights(in ```.h5 .json``` format) have to be downloaded and placed into directory ``` weights/keras ``` 17 | 18 | 19 | Already converted weights can be downloaded here: 20 | 21 | * [pspnet50_ade20k.h5](https://www.dropbox.com/s/0uxn14y26jcui4v/pspnet50_ade20k.h5?dl=1) 22 | [pspnet50_ade20k.json](https://www.dropbox.com/s/v41lvku2lx7lh6m/pspnet50_ade20k.json?dl=1) 23 | * [pspnet101_cityscapes.h5](https://www.dropbox.com/s/c17g94n946tpalb/pspnet101_cityscapes.h5?dl=1) 24 | [pspnet101_cityscapes.json](https://www.dropbox.com/s/fswowe8e3o14tdm/pspnet101_cityscapes.json?dl=1) 25 | * [pspnet101_voc2012.h5](https://www.dropbox.com/s/uvqj2cjo4b9c5wg/pspnet101_voc2012.h5?dl=1) 26 | [pspnet101_voc2012.json](https://www.dropbox.com/s/rr5taqu19f5fuzy/pspnet101_voc2012.json?dl=1) 27 | 28 | Running this needs the compiled original PSPNet caffe code and pycaffe. 29 | 30 | ```bash 31 | python weight_converter.py 32 | ``` 33 | 34 | ## Usage: 35 | 36 | ```bash 37 | python pspnet.py -m -i -o 38 | python pspnet.py -m pspnet101_cityscapes -i example_images/cityscapes.png -o example_results/cityscapes.jpg 39 | python pspnet.py -m pspnet101_voc2012 -i example_images/pascal_voc.jpg -o example_results/pascal_voc.jpg 40 | ``` 41 | List of arguments: 42 | ```bash 43 | -m --model - which model to use: 'pspnet50_ade20k', 'pspnet101_cityscapes', 'pspnet101_voc2012' 44 | --id - (int) GPU Device id. Default 0 45 | -s --sliding - Use sliding window 46 | -f --flip - Additional prediction of flipped image 47 | -ms --multi_scale - Predict on multiscale images 48 | ``` 49 | ## Keras results: 50 | ![Original](example_images/ade20k.jpg) 51 | ![New](example_results/ade20k_seg.jpg) 52 | ![New](example_results/ade20k_seg_blended.jpg) 53 | ![New](example_results/ade20k_probs.jpg) 54 | 55 | ![Original](example_images/cityscapes.png) 56 | ![New](example_results/cityscapes_seg.jpg) 57 | ![New](example_results/cityscapes_seg_blended.jpg) 58 | ![New](example_results/cityscapes_probs.jpg) 59 | 60 | ![Original](example_images/pascal_voc.jpg) 61 | ![New](example_results/pascal_voc_seg.jpg) 62 | ![New](example_results/pascal_voc_seg_blended.jpg) 63 | ![New](example_results/pascal_voc_probs.jpg) 64 | 65 | 66 | ## Implementation details 67 | * The interpolation layer is implemented as custom layer "Interp" 68 | * Forward step takes about ~1 sec on single image 69 | * Memory usage can be optimized with: 70 | ```python 71 | config = tf.ConfigProto() 72 | config.gpu_options.per_process_gpu_memory_fraction = 0.3 73 | sess = tf.Session(config=config) 74 | ``` 75 | * ```ndimage.zoom``` can take a long time 76 | 77 | 78 | 79 | 80 | 81 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/caffe-tensorflow/examples/imagenet/models/helper.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os.path as osp 3 | import numpy as np 4 | 5 | # Add the kaffe module to the import path 6 | sys.path.append(osp.realpath(osp.join(osp.dirname(__file__), '../../../'))) 7 | 8 | from googlenet import GoogleNet 9 | from vgg import VGG16 10 | from alexnet import AlexNet 11 | from caffenet import CaffeNet 12 | from nin import NiN 13 | from resnet import ResNet50, ResNet101, ResNet152 14 | 15 | 16 | class DataSpec(object): 17 | '''Input data specifications for an ImageNet model.''' 18 | 19 | def __init__(self, 20 | batch_size, 21 | scale_size, 22 | crop_size, 23 | isotropic, 24 | channels=3, 25 | mean=None, 26 | bgr=True): 27 | # The recommended batch size for this model 28 | self.batch_size = batch_size 29 | # The image should be scaled to this size first during preprocessing 30 | self.scale_size = scale_size 31 | # Whether the model expects the rescaling to be isotropic 32 | self.isotropic = isotropic 33 | # A square crop of this dimension is expected by this model 34 | self.crop_size = crop_size 35 | # The number of channels in the input image expected by this model 36 | self.channels = channels 37 | # The mean to be subtracted from each image. By default, the per-channel ImageNet mean. 38 | # The values below are ordered BGR, as many Caffe models are trained in this order. 39 | # Some of the earlier models (like AlexNet) used a spatial three-channeled mean. 40 | # However, using just the per-channel mean values instead doesn't affect things too much. 41 | self.mean = mean if mean is not None else np.array([104., 117., 124.]) 42 | # Whether this model expects images to be in BGR order 43 | self.expects_bgr = True 44 | 45 | 46 | def alexnet_spec(batch_size=500): 47 | '''Parameters used by AlexNet and its variants.''' 48 | return DataSpec(batch_size=batch_size, scale_size=256, crop_size=227, isotropic=False) 49 | 50 | 51 | def std_spec(batch_size, isotropic=True): 52 | '''Parameters commonly used by "post-AlexNet" architectures.''' 53 | return DataSpec(batch_size=batch_size, scale_size=256, crop_size=224, isotropic=isotropic) 54 | 55 | # Collection of sample auto-generated models 56 | MODELS = (AlexNet, CaffeNet, GoogleNet, NiN, ResNet50, ResNet101, ResNet152, VGG16) 57 | 58 | # The corresponding data specifications for the sample models 59 | # These specifications are based on how the models were trained. 60 | # The recommended batch size is based on a Titan X (12GB). 61 | MODEL_DATA_SPECS = { 62 | AlexNet: alexnet_spec(), 63 | CaffeNet: alexnet_spec(), 64 | GoogleNet: std_spec(batch_size=200, isotropic=False), 65 | ResNet50: std_spec(batch_size=25), 66 | ResNet101: std_spec(batch_size=25), 67 | ResNet152: std_spec(batch_size=25), 68 | NiN: std_spec(batch_size=500), 69 | VGG16: std_spec(batch_size=25) 70 | } 71 | 72 | 73 | def get_models(): 74 | '''Returns a tuple of sample models.''' 75 | return MODELS 76 | 77 | 78 | def get_data_spec(model_instance=None, model_class=None): 79 | '''Returns the data specifications for the given network.''' 80 | model_class = model_class or model_instance.__class__ 81 | return MODEL_DATA_SPECS[model_class] 82 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/caffe-tensorflow/README.md: -------------------------------------------------------------------------------- 1 | # Caffe to TensorFlow 2 | 3 | Convert [Caffe](https://github.com/BVLC/caffe/) models to [TensorFlow](https://github.com/tensorflow/tensorflow). 4 | 5 | ## Usage 6 | 7 | Run `convert.py` to convert an existing Caffe model to TensorFlow. 8 | 9 | Make sure you're using the latest Caffe format (see the notes section for more info). 10 | 11 | The output consists of two files: 12 | 13 | 1. A data file (in NumPy's native format) containing the model's learned parameters. 14 | 2. A Python class that constructs the model's graph. 15 | 16 | ### Examples 17 | 18 | See the [examples](examples/) folder for more details. 19 | 20 | ## Verification 21 | 22 | The following converted models have been verified on the ILSVRC2012 validation set using 23 | [validate.py](examples/imagenet/validate.py). 24 | 25 | | Model | Top 5 Accuracy | 26 | |:------------------------------------------------------|---------------:| 27 | | [ResNet 152](http://arxiv.org/abs/1512.03385) | 92.92% | 28 | | [ResNet 101](http://arxiv.org/abs/1512.03385) | 92.63% | 29 | | [ResNet 50](http://arxiv.org/abs/1512.03385) | 92.02% | 30 | | [VGG 16](http://arxiv.org/abs/1409.1556) | 89.88% | 31 | | [GoogLeNet](http://arxiv.org/abs/1409.4842) | 89.06% | 32 | | [Network in Network](http://arxiv.org/abs/1312.4400) | 81.21% | 33 | | [CaffeNet](http://arxiv.org/abs/1408.5093) | 79.93% | 34 | | [AlexNet](http://goo.gl/3BilWd) | 79.84% | 35 | 36 | ## Notes 37 | 38 | - Only the new Caffe model format is supported. If you have an old model, use the `upgrade_net_proto_text` and `upgrade_net_proto_binary` tools that ship with Caffe to upgrade them first. Also make sure you're using a fairly recent version of Caffe. 39 | 40 | - It appears that Caffe and TensorFlow cannot be concurrently invoked (CUDA conflicts - even with `set_mode_cpu`). This makes it a two-stage process: first extract the parameters with `convert.py`, then import it into TensorFlow. 41 | 42 | - Caffe is not strictly required. If PyCaffe is found in your `PYTHONPATH`, and the `USE_PYCAFFE` environment variable is set, it will be used. Otherwise, a fallback will be used. However, the fallback uses the pure Python-based implementation of protobuf, which is astoundingly slow (~1.5 minutes to parse the VGG16 parameters). The experimental CPP protobuf backend doesn't particularly help here, since it runs into the file size limit (Caffe gets around this by overriding this limit in C++). A cleaner solution here would be to implement the loader as a C++ module. 43 | 44 | - Only a subset of Caffe layers and accompanying parameters are currently supported. 45 | 46 | - Not all Caffe models can be converted to TensorFlow. For instance, Caffe supports arbitrary padding whereas TensorFlow's support is currently restricted to `SAME` and `VALID`. 47 | 48 | - The border values are handled differently by Caffe and TensorFlow. However, these don't appear to affect things too much. 49 | 50 | - Image rescaling can affect the ILSVRC2012 top 5 accuracy listed above slightly. VGG16 expects isotropic rescaling (anisotropic reduces accuracy to 88.45%) whereas BVLC's implementation of GoogLeNet expects anisotropic (isotropic reduces accuracy to 87.7%). 51 | 52 | - The support class `kaffe.tensorflow.Network` has no internal dependencies. It can be safely extracted and deployed without the rest of this library. 53 | 54 | - The ResNet model uses 1x1 convolutions with a stride of 2. This is currently only supported in the master branch of TensorFlow (the latest release at time of writing being v0.8.0, which does not support it). 55 | -------------------------------------------------------------------------------- /CycleGAN-tensorflow/main.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import tensorflow as tf 4 | tf.set_random_seed(19) 5 | from model import cyclegan 6 | 7 | parser = argparse.ArgumentParser(description='') 8 | parser.add_argument('--dataset_dir', dest='dataset_dir', default='horse2zebra', help='path of the dataset') 9 | parser.add_argument('--epoch', dest='epoch', type=int, default=200, help='# of epoch') 10 | parser.add_argument('--epoch_step', dest='epoch_step', type=int, default=100, help='# of epoch to decay lr') 11 | parser.add_argument('--batch_size', dest='batch_size', type=int, default=1, help='# images in batch') 12 | parser.add_argument('--train_size', dest='train_size', type=int, default=1e8, help='# images used to train') 13 | parser.add_argument('--load_size', dest='load_size', type=int, default=286, help='scale images to this size') 14 | parser.add_argument('--fine_size', dest='fine_size', type=int, default=256, help='then crop to this size') 15 | parser.add_argument('--ngf', dest='ngf', type=int, default=64, help='# of gen filters in first conv layer') 16 | parser.add_argument('--ndf', dest='ndf', type=int, default=64, help='# of discri filters in first conv layer') 17 | parser.add_argument('--input_nc', dest='input_nc', type=int, default=3, help='# of input image channels') 18 | parser.add_argument('--output_nc', dest='output_nc', type=int, default=3, help='# of output image channels') 19 | parser.add_argument('--lr', dest='lr', type=float, default=0.0002, help='initial learning rate for adam') 20 | parser.add_argument('--beta1', dest='beta1', type=float, default=0.5, help='momentum term of adam') 21 | parser.add_argument('--which_direction', dest='which_direction', default='AtoB', help='AtoB or BtoA') 22 | parser.add_argument('--phase', dest='phase', default='train', help='train, test') 23 | parser.add_argument('--save_freq', dest='save_freq', type=int, default=1000, help='save a model every save_freq iterations') 24 | parser.add_argument('--print_freq', dest='print_freq', type=int, default=100, help='print the debug information every print_freq iterations') 25 | parser.add_argument('--continue_train', dest='continue_train', type=bool, default=False, help='if continue training, load the latest model: 1: true, 0: false') 26 | parser.add_argument('--checkpoint_dir', dest='checkpoint_dir', default='./checkpoint', help='models are saved here') 27 | parser.add_argument('--sample_dir', dest='sample_dir', default='./sample', help='sample are saved here') 28 | parser.add_argument('--test_dir', dest='test_dir', default='./test', help='test sample are saved here') 29 | parser.add_argument('--L1_lambda', dest='L1_lambda', type=float, default=10.0, help='weight on L1 term in objective') 30 | parser.add_argument('--use_resnet', dest='use_resnet', type=bool, default=True, help='generation network using reidule block') 31 | parser.add_argument('--use_lsgan', dest='use_lsgan', type=bool, default=True, help='gan loss defined in lsgan') 32 | parser.add_argument('--max_size', dest='max_size', type=int, default=50, help='max size of image pool, 0 means do not use image pool') 33 | 34 | args = parser.parse_args() 35 | 36 | 37 | def main(_): 38 | if not os.path.exists(args.checkpoint_dir): 39 | os.makedirs(args.checkpoint_dir) 40 | if not os.path.exists(args.sample_dir): 41 | os.makedirs(args.sample_dir) 42 | if not os.path.exists(args.test_dir): 43 | os.makedirs(args.test_dir) 44 | 45 | tfconfig = tf.ConfigProto(allow_soft_placement=True) 46 | tfconfig.gpu_options.allow_growth = True 47 | with tf.Session(config=tfconfig) as sess: 48 | model = cyclegan(sess, args) 49 | model.train(args) if args.phase == 'train' \ 50 | else model.test(args) 51 | 52 | if __name__ == '__main__': 53 | tf.app.run() 54 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/caffe-tensorflow/examples/imagenet/validate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | '''Validates a converted ImageNet model against the ILSVRC12 validation set.''' 3 | 4 | import argparse 5 | import numpy as np 6 | import tensorflow as tf 7 | import os.path as osp 8 | 9 | import models 10 | import dataset 11 | 12 | 13 | def load_model(name): 14 | '''Creates and returns an instance of the model given its class name. 15 | The created model has a single placeholder node for feeding images. 16 | ''' 17 | # Find the model class from its name 18 | all_models = models.get_models() 19 | lut = {model.__name__: model for model in all_models} 20 | if name not in lut: 21 | print('Invalid model index. Options are:') 22 | # Display a list of valid model names 23 | for model in all_models: 24 | print('\t* {}'.format(model.__name__)) 25 | return None 26 | NetClass = lut[name] 27 | 28 | # Create a placeholder for the input image 29 | spec = models.get_data_spec(model_class=NetClass) 30 | data_node = tf.placeholder(tf.float32, 31 | shape=(None, spec.crop_size, spec.crop_size, spec.channels)) 32 | 33 | # Construct and return the model 34 | return NetClass({'data': data_node}) 35 | 36 | 37 | def validate(net, model_path, image_producer, top_k=5): 38 | '''Compute the top_k classification accuracy for the given network and images.''' 39 | # Get the data specifications for given network 40 | spec = models.get_data_spec(model_instance=net) 41 | # Get the input node for feeding in the images 42 | input_node = net.inputs['data'] 43 | # Create a placeholder for the ground truth labels 44 | label_node = tf.placeholder(tf.int32) 45 | # Get the output of the network (class probabilities) 46 | probs = net.get_output() 47 | # Create a top_k accuracy node 48 | top_k_op = tf.nn.in_top_k(probs, label_node, top_k) 49 | # The number of images processed 50 | count = 0 51 | # The number of correctly classified images 52 | correct = 0 53 | # The total number of images 54 | total = len(image_producer) 55 | 56 | with tf.Session() as sesh: 57 | coordinator = tf.train.Coordinator() 58 | # Load the converted parameters 59 | net.load(data_path=model_path, session=sesh) 60 | # Start the image processing workers 61 | threads = image_producer.start(session=sesh, coordinator=coordinator) 62 | # Iterate over and classify mini-batches 63 | for (labels, images) in image_producer.batches(sesh): 64 | correct += np.sum(sesh.run(top_k_op, 65 | feed_dict={input_node: images, 66 | label_node: labels})) 67 | count += len(labels) 68 | cur_accuracy = float(correct) * 100 / count 69 | print('{:>6}/{:<6} {:>6.2f}%'.format(count, total, cur_accuracy)) 70 | # Stop the worker threads 71 | coordinator.request_stop() 72 | coordinator.join(threads, stop_grace_period_secs=2) 73 | print('Top {} Accuracy: {}'.format(top_k, float(correct) / total)) 74 | 75 | 76 | 77 | def main(): 78 | # Parse arguments 79 | parser = argparse.ArgumentParser() 80 | parser.add_argument('model_path', help='Path to the converted model parameters (.npy)') 81 | parser.add_argument('val_gt', help='Path to validation set ground truth (.txt)') 82 | parser.add_argument('imagenet_data_dir', help='ImageNet validation set images directory path') 83 | parser.add_argument('--model', default='GoogleNet', help='The name of the model to evaluate') 84 | args = parser.parse_args() 85 | 86 | # Load the network 87 | net = load_model(args.model) 88 | if net is None: 89 | exit(-1) 90 | 91 | # Load the dataset 92 | data_spec = models.get_data_spec(model_instance=net) 93 | image_producer = dataset.ImageNetProducer(val_path=args.val_gt, 94 | data_path=args.imagenet_data_dir, 95 | data_spec=data_spec) 96 | 97 | # Evaluate its performance on the ILSVRC12 validation set 98 | validate(net, args.model_path, image_producer) 99 | 100 | 101 | if __name__ == '__main__': 102 | main() 103 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | from os import path 2 | from os.path import join 3 | from scipy.misc import imresize 4 | from python_utils.preprocessing import data_generator_s31 5 | from python_utils.callbacks import callbacks 6 | from keras.models import load_model 7 | import layers_builder as layers 8 | import numpy as np 9 | import argparse 10 | import os 11 | 12 | 13 | def set_npy_weights(weights_path, model): 14 | npy_weights_path = join("weights", "npy", weights_path + ".npy") 15 | json_path = join("weights", "keras", weights_path + ".json") 16 | h5_path = join("weights", "keras", weights_path + ".h5") 17 | 18 | print("Importing weights from %s" % npy_weights_path) 19 | weights = np.load(npy_weights_path).item() 20 | 21 | for layer in model.layers: 22 | print(layer.name) 23 | if layer.name[:4] == 'conv' and layer.name[-2:] == 'bn': 24 | mean = weights[layer.name]['mean'].reshape(-1) 25 | variance = weights[layer.name]['variance'].reshape(-1) 26 | scale = weights[layer.name]['scale'].reshape(-1) 27 | offset = weights[layer.name]['offset'].reshape(-1) 28 | 29 | self.model.get_layer(layer.name).set_weights( 30 | [scale, offset, mean, variance]) 31 | 32 | elif layer.name[:4] == 'conv' and not layer.name[-4:] == 'relu': 33 | try: 34 | weight = weights[layer.name]['weights'] 35 | model.get_layer(layer.name).set_weights([weight]) 36 | except Exception as err: 37 | try: 38 | biases = weights[layer.name]['biases'] 39 | model.get_layer(layer.name).set_weights([weight, 40 | biases]) 41 | except Exception as err2: 42 | print(err2) 43 | 44 | if layer.name == 'activation_52': 45 | break 46 | 47 | 48 | def train(datadir, logdir, input_size, nb_classes, resnet_layers, batchsize, weights, initial_epoch, pre_trained, sep): 49 | if args.weights: 50 | model = load_model(weights) 51 | else: 52 | model = layers.build_pspnet(nb_classes=nb_classes, 53 | resnet_layers=resnet_layers, 54 | input_shape=input_size) 55 | set_npy_weights(pre_trained, model) 56 | dataset_len = len(os.listdir(os.path.join(datadir, 'imgs'))) 57 | train_generator, val_generator = data_generator_s31( 58 | datadir=datadir, batch_size=batchsize, input_size=input_size, nb_classes=nb_classes, separator=sep) 59 | model.fit_generator( 60 | generator=train_generator, 61 | epochs=100000, verbose=True, steps_per_epoch=500, 62 | callbacks=callbacks(logdir), initial_epoch=initial_epoch) 63 | 64 | 65 | class PSPNet(object): 66 | """Pyramid Scene Parsing Network by Hengshuang Zhao et al 2017""" 67 | 68 | def __init__(self, nb_classes, resnet_layers, input_shape): 69 | self.input_shape = input_shape 70 | self.model = layers.build_pspnet(nb_classes=nb_classes, 71 | layers=resnet_layers, 72 | input_shape=self.input_shape) 73 | print("Load pre-trained weights") 74 | self.model.load_weights("weights/keras/pspnet101_voc2012.h5") 75 | 76 | if __name__ == '__main__': 77 | parser = argparse.ArgumentParser() 78 | parser.add_argument('--input_dim', type=int, default=473) 79 | parser.add_argument('--classes', type=int, default=2) 80 | parser.add_argument('--resnet_layers', type=int, default=50) 81 | parser.add_argument('--batch', type=int, default=2) 82 | parser.add_argument('--datadir', type=str, required=True) 83 | parser.add_argument('--logdir', type=str) 84 | parser.add_argument('--weights', type=str, default=None) 85 | parser.add_argument('--initial_epoch', type=int, default=0) 86 | parser.add_argument('-m', '--model', type=str, default='pspnet50_ade20k', 87 | help='Model/Weights to use', 88 | choices=['pspnet50_ade20k', 89 | 'pspnet101_cityscapes', 90 | 'pspnet101_voc2012']) 91 | parser.add_argument('--gpu', type=int, default=0) 92 | parser.add_argument('--sep', default=').') 93 | args = parser.parse_args() 94 | 95 | os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) 96 | 97 | train(args.datadir, args.logdir, (640, 480), args.classes, args.resnet_layers, 98 | args.batch, args.weights, args.initial_epoch, args.model, args.sep) 99 | -------------------------------------------------------------------------------- /PSPNet-Keras-tensorflow/train.py: -------------------------------------------------------------------------------- 1 | from os import path 2 | from os.path import join 3 | from scipy.misc import imresize 4 | from python_utils.preprocessing import data_generator_s31 5 | from python_utils.callbacks import callbacks 6 | from keras.models import load_model 7 | import layers_builder as layers 8 | import numpy as np 9 | import argparse 10 | import os 11 | 12 | 13 | def set_npy_weights(weights_path, model): 14 | npy_weights_path = join("weights", "npy", weights_path + ".npy") 15 | json_path = join("weights", "keras", weights_path + ".json") 16 | h5_path = join("weights", "keras", weights_path + ".h5") 17 | 18 | print("Importing weights from %s" % npy_weights_path) 19 | weights = np.load(npy_weights_path).item() 20 | 21 | for layer in model.layers: 22 | print(layer.name) 23 | if layer.name[:4] == 'conv' and layer.name[-2:] == 'bn': 24 | mean = weights[layer.name]['mean'].reshape(-1) 25 | variance = weights[layer.name]['variance'].reshape(-1) 26 | scale = weights[layer.name]['scale'].reshape(-1) 27 | offset = weights[layer.name]['offset'].reshape(-1) 28 | 29 | self.model.get_layer(layer.name).set_weights( 30 | [scale, offset, mean, variance]) 31 | 32 | elif layer.name[:4] == 'conv' and not layer.name[-4:] == 'relu': 33 | try: 34 | weight = weights[layer.name]['weights'] 35 | model.get_layer(layer.name).set_weights([weight]) 36 | except Exception as err: 37 | try: 38 | biases = weights[layer.name]['biases'] 39 | model.get_layer(layer.name).set_weights([weight, 40 | biases]) 41 | except Exception as err2: 42 | print(err2) 43 | 44 | if layer.name == 'activation_52': 45 | break 46 | 47 | 48 | def train(datadir, logdir, input_size, nb_classes, resnet_layers, batchsize, weights, initial_epoch, pre_trained, sep): 49 | if args.weights: 50 | model = load_model(weights) 51 | else: 52 | model = layers.build_pspnet(nb_classes=nb_classes, 53 | resnet_layers=resnet_layers, 54 | input_shape=input_size) 55 | set_npy_weights(pre_trained, model) 56 | dataset_len = len(os.listdir(os.path.join(datadir, 'imgs'))) 57 | train_generator, val_generator = data_generator_s31( 58 | datadir=datadir, batch_size=batchsize, input_size=input_size, nb_classes=nb_classes, separator=sep) 59 | model.fit_generator( 60 | generator=train_generator, 61 | epochs=100000, verbose=True, steps_per_epoch=500, 62 | callbacks=callbacks(logdir), initial_epoch=initial_epoch) 63 | 64 | 65 | class PSPNet(object): 66 | """Pyramid Scene Parsing Network by Hengshuang Zhao et al 2017""" 67 | 68 | def __init__(self, nb_classes, resnet_layers, input_shape): 69 | self.input_shape = input_shape 70 | self.model = layers.build_pspnet(nb_classes=nb_classes, 71 | layers=resnet_layers, 72 | input_shape=self.input_shape) 73 | print("Load pre-trained weights") 74 | self.model.load_weights("weights/keras/pspnet101_voc2012.h5") 75 | 76 | if __name__ == '__main__': 77 | parser = argparse.ArgumentParser() 78 | parser.add_argument('--input_dim', type=int, default=473) 79 | parser.add_argument('--classes', type=int, default=2) 80 | parser.add_argument('--resnet_layers', type=int, default=50) 81 | parser.add_argument('--batch', type=int, default=2) 82 | parser.add_argument('--datadir', type=str, required=True) 83 | parser.add_argument('--logdir', type=str) 84 | parser.add_argument('--weights', type=str, default=None) 85 | parser.add_argument('--initial_epoch', type=int, default=0) 86 | parser.add_argument('-m', '--model', type=str, default='pspnet50_ade20k', 87 | help='Model/Weights to use', 88 | choices=['pspnet50_ade20k', 89 | 'pspnet101_cityscapes', 90 | 'pspnet101_voc2012']) 91 | parser.add_argument('--gpu', type=int, default=0) 92 | parser.add_argument('--sep', default=').') 93 | args = parser.parse_args() 94 | 95 | os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) 96 | 97 | train(args.datadir, args.logdir, (640, 480), args.classes, args.resnet_layers, 98 | args.batch, args.weights, args.initial_epoch, args.model, args.sep) 99 | -------------------------------------------------------------------------------- /process_image.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | MEAN_WIDTH 4 | HEAD_ZERO_X 5 | HEAD_ZERO_Y 6 | 7 | W_INCREASE 8 | 9 | MODEL_INPUT_SIZE 10 | MASK_SIZE 11 | 12 | MORPH = 5 13 | ALPHA = 0.5 14 | 15 | CUM_REDUCE = 0.1 16 | CUM_INCREASE = 10 17 | 18 | # predictions blending methods 19 | def masked_images_blending(img1, img1_mask, img2, img2_mask, alpha): 20 | mask_intersection = img1_mask * img2_mask 21 | return img1 * (img1_mask - mask_intersection) \ 22 | + img2 * (img2_mask - mask_intersection) \ 23 | + alpha * img1 * mask_intersection \ 24 | + (1 - alpha) * img2 * mask_intersection 25 | 26 | 27 | def blend_background(initial_image, prediction, mask, morph_param, alpha): 28 | background_image = np.zeros_like(initial_image) 29 | 30 | kernel = np.ones(morph_param, np.uint8) 31 | background_mask = cv2.erode(mask, kernel, iterations = 1) 32 | background_mask = 1 - background_mask 33 | 34 | return masked_images_blending(background_image, background_mask, prediction, mask, alpha) 35 | 36 | 37 | def get_image_mask(image): 38 | return pspnet.get_mask(image) 39 | 40 | def get_person_prediction(image): 41 | return cyclenet.get_prediction(image) 42 | 43 | def forward_person_transform(image, mask): 44 | x, y, w, h = cv2.boudnigBox(mask) 45 | 46 | cummask = np.cumsum(mask.sum(axis=1)) 47 | total_sum = cummask[-1] 48 | cummask[cummask < total_sum * CUM_REDUCE] = 0 49 | cummask[cummask > tutal_sum * (1 - CUM_REDUCE)] = 0 50 | cummask = cummask > 0 51 | 52 | masked = np.where(cummask)[0] 53 | masked_l = max(0, masked.min() - CUM_INCREASE) 54 | masked_r = min(image.shape[1], masked.max() + CUM_INCREASE) 55 | 56 | w = masked_r - masked_l 57 | fx = MEAN_WIDTH / w 58 | 59 | person = cv2.resize(image[x:x+w, y:y+h], fx=fx) 60 | person_mask = cv2.resize(mask[x:x+w, y:y+h], fx=fx, interpolation=cv2.INTER_NEAREST) 61 | 62 | return person, person_mask 63 | 64 | 65 | def inverse_person_transform(image, image_mask, prediction, prediction_mask): 66 | ix, iy, iw, ih = cv2.boudnigBox(image_mask) 67 | px, py, pw, ph = cv2.boudnigBox(prediction_mask) 68 | 69 | prediction = np.zeros_like(image) 70 | prediction[ix:ix+iw, iy:iy+ih] = cv2.resize(prediction[px:px+pw, py:py+ph], (iw, ih)) 71 | 72 | return blend_background(image, prediction, image_mask, MORPH, ALPHA) 73 | 74 | 75 | def inpaint_image(image, mask, y): 76 | mx, my, mw, mh = cv2.boudnigBox(mask) 77 | 78 | from_x = max(0, mx - W_INCREASE) 79 | to_x = min(image.shape[1], mx + W_INCREASE) 80 | to_y = min(my, y) 81 | 82 | inpainting_mask = np.zeros((to_y, image.shape[1])) 83 | inpainting_mask[:to_y, from_x:to_x] = 1 84 | 85 | result = image.copy() 86 | result[:to_y] = cv2.inpaint(image[:to_y], inpainting_mask, 3, cv2.INPAINT_TELEA) 87 | 88 | return result 89 | 90 | 91 | # put image into fake background and/or add parts from best fit person if we have partial image 92 | def align_person(image, mask, train_images, train_masks): 93 | partial_image = np.zeros(MODEL_INPUT_SIZE) 94 | partial_fitted_mask = np.zeros(MASK_SIZE) 95 | 96 | partial_image[HEAD_Y:HEAD_Y + image.shape[0], HEAD_X:HEAD_X + MEAN_WIDTH] = image * mask 97 | partial_mask[HEAD_Y:HEAD_Y + image.shape[0], HEAD_X:HEAD_X + MEAN_WIDTH] = mask 98 | 99 | # get best base image 100 | masks_intersection = np.sum(train_masks[:, :HEAD_Y + image.shape[0]] * partial_mask, axis=0) 101 | masks_union = np.sum((train_masks[:, :HEAD_Y + image.shape[0]] + partial_mask) > 0, axis=0) 102 | best_fit = np.argmax(masks_intersection / masks_union) 103 | 104 | base_image = inpaint_image(train_images[best_fit], train_masks[best_fit], HEAD_Y + image.shape[0]) 105 | 106 | return partial_image + (1 - partial_mask) * base_image 107 | 108 | 109 | def process_image(image, train_images, train_masks): 110 | # extract person mask using pspnet 111 | mask = get_image_mask(image) 112 | 113 | # get only area around person 114 | person_image, person_mask = forward_person_transform(image, mask) 115 | 116 | # prepare image to put into generative model 117 | person_image_aligned, person_mask_aligned = align_person(person_image, person_mask, train_images, train_masks) 118 | 119 | # get prediction from unboxing model 120 | prediction = get_person_prediction(person_image_aligned) 121 | 122 | # put prediction on old background and blend 123 | resulted_image = inverse_person_transform(image, mask, prediction, person_mask_aligned) 124 | 125 | return resulted_image 126 | 127 | -------------------------------------------------------------------------------- /process_all/process_image.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | MEAN_WIDTH 4 | HEAD_ZERO_X 5 | HEAD_ZERO_Y 6 | 7 | W_INCREASE 8 | 9 | MODEL_INPUT_SIZE 10 | MASK_SIZE 11 | 12 | MORPH = 5 13 | ALPHA = 0.5 14 | 15 | CUM_REDUCE = 0.1 16 | CUM_INCREASE = 10 17 | 18 | # predictions blending methods 19 | def masked_images_blending(img1, img1_mask, img2, img2_mask, alpha): 20 | mask_intersection = img1_mask * img2_mask 21 | return img1 * (img1_mask - mask_intersection) \ 22 | + img2 * (img2_mask - mask_intersection) \ 23 | + alpha * img1 * mask_intersection \ 24 | + (1 - alpha) * img2 * mask_intersection 25 | 26 | 27 | def blend_background(initial_image, prediction, mask, morph_param, alpha): 28 | background_image = np.zeros_like(initial_image) 29 | 30 | kernel = np.ones(morph_param, np.uint8) 31 | background_mask = cv2.erode(mask, kernel, iterations = 1) 32 | background_mask = 1 - background_mask 33 | 34 | return masked_images_blending(background_image, background_mask, prediction, mask, alpha) 35 | 36 | 37 | def get_image_mask(image): 38 | return pspnet.get_mask(image) 39 | 40 | def get_person_prediction(image): 41 | return cyclenet.get_prediction(image) 42 | 43 | def forward_person_transform(image, mask): 44 | x, y, w, h = cv2.boudnigBox(mask) 45 | 46 | cummask = np.cumsum(mask.sum(axis=1)) 47 | total_sum = cummask[-1] 48 | cummask[cummask < total_sum * CUM_REDUCE] = 0 49 | cummask[cummask > tutal_sum * (1 - CUM_REDUCE)] = 0 50 | cummask = cummask > 0 51 | 52 | masked = np.where(cummask)[0] 53 | masked_l = max(0, masked.min() - CUM_INCREASE) 54 | masked_r = min(image.shape[1], masked.max() + CUM_INCREASE) 55 | 56 | w = masked_r - masked_l 57 | fx = MEAN_WIDTH / w 58 | 59 | person = cv2.resize(image[x:x+w, y:y+h], fx=fx) 60 | person_mask = cv2.resize(mask[x:x+w, y:y+h], fx=fx, interpolation=cv2.INTER_NEAREST) 61 | 62 | return person, person_mask 63 | 64 | 65 | def inverse_person_transform(image, image_mask, prediction, prediction_mask): 66 | ix, iy, iw, ih = cv2.boudnigBox(image_mask) 67 | px, py, pw, ph = cv2.boudnigBox(prediction_mask) 68 | 69 | prediction = np.zeros_like(image) 70 | prediction[ix:ix+iw, iy:iy+ih] = cv2.resize(prediction[px:px+pw, py:py+ph], (iw, ih)) 71 | 72 | return blend_background(image, prediction, image_mask, MORPH, ALPHA) 73 | 74 | 75 | def inpaint_image(image, mask, y): 76 | mx, my, mw, mh = cv2.boudnigBox(mask) 77 | 78 | from_x = max(0, mx - W_INCREASE) 79 | to_x = min(image.shape[1], mx + W_INCREASE) 80 | to_y = min(my, y) 81 | 82 | inpainting_mask = np.zeros((to_y, image.shape[1])) 83 | inpainting_mask[:to_y, from_x:to_x] = 1 84 | 85 | result = image.copy() 86 | result[:to_y] = cv2.inpaint(image[:to_y], inpainting_mask, 3, cv2.INPAINT_TELEA) 87 | 88 | return result 89 | 90 | 91 | # put image into fake background and/or add parts from best fit person if we have partial image 92 | def align_person(image, mask, train_images, train_masks): 93 | partial_image = np.zeros(MODEL_INPUT_SIZE) 94 | partial_fitted_mask = np.zeros(MASK_SIZE) 95 | 96 | partial_image[HEAD_Y:HEAD_Y + image.shape[0], HEAD_X:HEAD_X + MEAN_WIDTH] = image * mask 97 | partial_mask[HEAD_Y:HEAD_Y + image.shape[0], HEAD_X:HEAD_X + MEAN_WIDTH] = mask 98 | 99 | # get best base image 100 | masks_intersection = np.sum(train_masks[:, :HEAD_Y + image.shape[0]] * partial_mask, axis=0) 101 | masks_union = np.sum((train_masks[:, :HEAD_Y + image.shape[0]] + partial_mask) > 0, axis=0) 102 | best_fit = np.argmax(masks_intersection / masks_union) 103 | 104 | base_image = inpaint_image(train_images[best_fit], train_masks[best_fit], HEAD_Y + image.shape[0]) 105 | 106 | return partial_image + (1 - partial_mask) * base_image 107 | 108 | 109 | def process_image(image, train_images, train_masks): 110 | # extract person mask using pspnet 111 | mask = get_image_mask(image) 112 | 113 | # get only area around person 114 | person_image, person_mask = forward_person_transform(image, mask) 115 | 116 | # prepare image to put into generative model 117 | person_image_aligned, person_mask_aligned = align_person(person_image, person_mask, train_images, train_masks) 118 | 119 | # get prediction from unboxing model 120 | prediction = get_person_prediction(person_image_aligned) 121 | 122 | # put prediction on old background and blend 123 | resulted_image = inverse_person_transform(image, mask, prediction, person_mask_aligned) 124 | 125 | return resulted_image 126 | 127 | -------------------------------------------------------------------------------- /process_all/align.py: -------------------------------------------------------------------------------- 1 | from params import * 2 | from align import * 3 | 4 | import numpy as np 5 | import cv2 6 | 7 | 8 | # predictions blending methods 9 | def masked_images_blending(img1, img1_mask, img2, img2_mask, alpha): 10 | mask_intersection = img1_mask * img2_mask 11 | 12 | return img1 * (img1_mask - mask_intersection) \ 13 | + img2 * (img2_mask - mask_intersection) \ 14 | + alpha * img1 * mask_intersection \ 15 | + (1 - alpha) * img2 * mask_intersection 16 | 17 | 18 | def blend_background(initial_image, prediction, mask, morph_param, alpha): 19 | background_image = initial_image.copy() 20 | 21 | kernel = np.ones(morph_param, np.uint8) 22 | background_mask = cv2.erode(mask, kernel, iterations = 1) 23 | background_mask = 1 - background_mask 24 | 25 | return masked_images_blending(background_image, background_mask[..., None], prediction, mask, alpha) 26 | 27 | 28 | def forward_person_transform(image, mask): 29 | x, y, w, h = cv2.boundingRect(mask) 30 | 31 | mask_cumsum = np.cumsum(mask.sum(axis=0)) 32 | total_sum = mask_cumsum[-1] 33 | cummask = np.zeros_like(mask_cumsum) 34 | cummask[mask_cumsum < total_sum * (1 - CUM_REDUCE)] = 1 35 | cummask[mask_cumsum < total_sum * CUM_REDUCE] = 0 36 | cummask = cummask > 0 37 | 38 | masked = np.where(cummask)[0] 39 | masked_l = max(0, masked.min() - CUM_INCREASE * w) 40 | masked_r = min(image.shape[1], masked.max() + CUM_INCREASE * w) 41 | 42 | cum_w = (masked_r - masked_l) 43 | print(cum_w / h) 44 | if cum_w / h > 0.3: 45 | fx = HEAD_W / cum_w 46 | else: 47 | fx = MEAN_WIDTH / cum_w 48 | 49 | person = cv2.resize(image[y:y+h, x:x+w], None, fx=fx, fy=fx) 50 | person_mask = cv2.resize(mask[y:y+h, x:x+w], None, fx=fx, fy=fx, interpolation=cv2.INTER_NEAREST) 51 | 52 | person_y = person.shape[0] 53 | if person_y > IMAGE_H - 2 * HEAD_Y: 54 | fy = (IMAGE_H - 2 * HEAD_Y) / person_y 55 | person = cv2.resize(person, None, fx=fy, fy=fy) 56 | person_mask = cv2.resize(person_mask, None, fx=fy, fy=fy, interpolation=cv2.INTER_NEAREST) 57 | 58 | return person, person_mask 59 | 60 | 61 | def inverse_person_transform(image, image_mask, prediction, prediction_mask): 62 | ix, iy, iw, ih = cv2.boundingRect(image_mask) 63 | px, py, pw, ph = cv2.boundingRect(prediction_mask) 64 | 65 | prediction_inverse = np.zeros_like(image) 66 | prediction_inverse[iy:iy+ih, ix:ix+iw] = cv2.resize(prediction[py:py+ph, px:px+pw], (iw, ih)) 67 | 68 | return blend_background(image, prediction_inverse, image_mask[..., None], MORPH, ALPHA) 69 | 70 | 71 | def load_train_data(images_paths, masks_paths): 72 | assert len(images_paths) == len(masks_paths), "wrong size, images length should be equal to masks length" 73 | 74 | train_images = np.zeros((len(images_paths), IMAGE_H, IMAGE_W, 3), np.uint8) 75 | train_masks = np.zeros((len(images_paths), IMAGE_H, IMAGE_W, 1), np.uint8) 76 | 77 | for i in range(len(images_paths)): 78 | train_images[i] = cv2.imread(images_paths[i]) 79 | train_masks[i] = cv2.imread(masks_paths[i], cv2.IMREAD_UNCHANGED)[..., None] > 0 80 | 81 | return train_images, train_masks 82 | 83 | 84 | def inpaint_image(image, mask, y): 85 | mx, my, mw, mh = cv2.boundingRect(mask) 86 | 87 | from_x = max(0, mx - W_INCREASE) 88 | to_x = min(image.shape[1], mx + mw + W_INCREASE) 89 | to_y = min(my + mh, y) 90 | 91 | inpaint_y = to_y 92 | if inpaint_y < 180: 93 | inpaint_y -= INPAINT_INCREASE 94 | 95 | print(to_y, inpaint_y) 96 | 97 | inpainting_mask = np.zeros((inpaint_y, image.shape[1]), np.uint8) 98 | inpainting_mask[:inpaint_y, from_x:to_x] = 1 99 | 100 | result = image.copy() 101 | result[:inpaint_y] = cv2.inpaint(image[:inpaint_y], inpainting_mask, 3, cv2.INPAINT_TELEA) 102 | 103 | return result 104 | 105 | 106 | # put image into fake background and/or add parts from best fit person if we have partial image 107 | def align_person(image, mask, train_images, train_masks): 108 | partial_image = np.zeros(MODEL_INPUT_SIZE) 109 | partial_mask = np.zeros(MASK_SIZE) 110 | 111 | head_w = image.shape[1] 112 | head_x = max(0, (IMAGE_W - head_w) // 2) 113 | 114 | partial_image[HEAD_Y:HEAD_Y + image.shape[0], head_x:head_x + head_w] = image * mask 115 | partial_mask[HEAD_Y:HEAD_Y + image.shape[0], head_x:head_x + head_w] = mask 116 | 117 | # get best base image 118 | masks_intersection = np.sum(train_masks * partial_mask, axis=(1,2,3)) 119 | masks_union = np.sum((train_masks + partial_mask) > 0, axis=(1,2,3)) 120 | best_fit = np.argmax(masks_intersection / masks_union) 121 | 122 | base_image = inpaint_image(train_images[best_fit], train_masks[best_fit][..., 0].astype(np.uint8), HEAD_Y + image.shape[0]) 123 | 124 | blended_aligned = blend_background(base_image, partial_image, partial_mask, MORPH, ALPHA) 125 | return blended_aligned, partial_mask 126 | 127 | 128 | -------------------------------------------------------------------------------- /utilsc.py: -------------------------------------------------------------------------------- 1 | """ 2 | Some codes from https://github.com/Newmu/dcgan_code 3 | """ 4 | from __future__ import division 5 | import math 6 | import pprint 7 | import scipy.misc 8 | import numpy as np 9 | import copy 10 | try: 11 | _imread = scipy.misc.imread 12 | except AttributeError: 13 | from imageio import imread as _imread 14 | 15 | import cv2 16 | 17 | pp = pprint.PrettyPrinter() 18 | 19 | get_stddev = lambda x, k_h, k_w: 1/math.sqrt(k_w*k_h*x.get_shape()[-1]) 20 | 21 | # ----------------------------- 22 | # new added functions for cyclegan 23 | class ImagePool(object): 24 | def __init__(self, maxsize=50): 25 | self.maxsize = maxsize 26 | self.num_img = 0 27 | self.images = [] 28 | 29 | def __call__(self, image): 30 | if self.maxsize <= 0: 31 | return image 32 | if self.num_img < self.maxsize: 33 | self.images.append(image) 34 | self.num_img += 1 35 | return image 36 | if np.random.rand() > 0.5: 37 | idx = int(np.random.rand()*self.maxsize) 38 | tmp1 = copy.copy(self.images[idx])[0] 39 | self.images[idx][0] = image[0] 40 | idx = int(np.random.rand()*self.maxsize) 41 | tmp2 = copy.copy(self.images[idx])[1] 42 | self.images[idx][1] = image[1] 43 | return [tmp1, tmp2] 44 | else: 45 | return image 46 | 47 | def load_test_data(image_path, fine_size=256): 48 | img = imread(image_path) 49 | img = scipy.misc.imresize(img, [fine_size, fine_size]) 50 | img = img/127.5 - 1 51 | return img 52 | 53 | def load_train_data(image_path, load_size=286, fine_size=256, is_testing=False): 54 | img_A = imread(image_path[0]) 55 | img_B = imread(image_path[1]) 56 | if not is_testing: 57 | img_A = scipy.misc.imresize(img_A, [load_size, load_size]) 58 | img_B = scipy.misc.imresize(img_B, [load_size, load_size]) 59 | h1 = int(np.ceil(np.random.uniform(1e-2, load_size-fine_size))) 60 | w1 = int(np.ceil(np.random.uniform(1e-2, load_size-fine_size))) 61 | img_A = img_A[h1:h1+fine_size, w1:w1+fine_size] 62 | img_B = img_B[h1:h1+fine_size, w1:w1+fine_size] 63 | 64 | if np.random.random() > 0.5: 65 | img_A = np.fliplr(img_A) 66 | img_B = np.fliplr(img_B) 67 | else: 68 | img_A = scipy.misc.imresize(img_A, [fine_size, fine_size]) 69 | img_B = scipy.misc.imresize(img_B, [fine_size, fine_size]) 70 | 71 | img_A = img_A/127.5 - 1. 72 | img_B = img_B/127.5 - 1. 73 | 74 | img_AB = np.concatenate((img_A, img_B), axis=2) 75 | # img_AB shape: (fine_size, fine_size, input_c_dim + output_c_dim) 76 | return img_AB 77 | 78 | # ----------------------------- 79 | 80 | def get_image(image_path, image_size, is_crop=True, resize_w=64, is_grayscale = False): 81 | return transform(imread(image_path, is_grayscale), image_size, is_crop, resize_w) 82 | 83 | def save_images(images, size, image_path): 84 | return imsave(inverse_transform(images), size, image_path) 85 | 86 | def imread(path, is_grayscale = False): 87 | if (is_grayscale): 88 | return _imread(path, flatten=True).astype(np.float) 89 | else: 90 | image = cv2.imread(path, cv2.IMREAD_UNCHANGED) 91 | image[..., :3] = cv2.cvtColor(image[..., :3], cv2.COLOR_BGR2RGB) 92 | 93 | if image.shape[2] == 4: 94 | image_fill = np.ones((image.shape[0], image.shape[1], 3)) * 255 95 | image_new = image[..., :3] * image[..., 3:] + image_fill * (1 - image[..., 3:]) 96 | cv2.imwrite('img.png', cv2.cvtColor(image_new.astype(np.uint8), cv2.COLOR_RGB2BGR)) 97 | return image_new 98 | #_imread(path, mode='RGB').astype(np.float) 99 | return image.astype(np.float32) 100 | 101 | def merge_images(images, size): 102 | return inverse_transform(images) 103 | 104 | def merge(images, size): 105 | h, w = images.shape[1], images.shape[2] 106 | img = np.zeros((h * size[0], w * size[1], 3)) 107 | for idx, image in enumerate(images): 108 | i = idx % size[1] 109 | j = idx // size[1] 110 | img[j*h:j*h+h, i*w:i*w+w, :] = image 111 | 112 | return img 113 | 114 | def imsave(images, size, path): 115 | return scipy.misc.imsave(path, merge(images, size)) 116 | 117 | def center_crop(x, crop_h, crop_w, 118 | resize_h=64, resize_w=64): 119 | if crop_w is None: 120 | crop_w = crop_h 121 | h, w = x.shape[:2] 122 | j = int(round((h - crop_h)/2.)) 123 | i = int(round((w - crop_w)/2.)) 124 | return scipy.misc.imresize( 125 | x[j:j+crop_h, i:i+crop_w], [resize_h, resize_w]) 126 | 127 | def transform(image, npx=64, is_crop=True, resize_w=64): 128 | # npx : # of pixels width/height of image 129 | if is_crop: 130 | cropped_image = center_crop(image, npx, resize_w=resize_w) 131 | else: 132 | cropped_image = image 133 | return np.array(cropped_image)/127.5 - 1. 134 | 135 | def inverse_transform(images): 136 | return (images+1.)/2. 137 | -------------------------------------------------------------------------------- /CycleGAN-tensorflow/utilsc.py: -------------------------------------------------------------------------------- 1 | """ 2 | Some codes from https://github.com/Newmu/dcgan_code 3 | """ 4 | from __future__ import division 5 | import math 6 | import pprint 7 | import scipy.misc 8 | import numpy as np 9 | import copy 10 | try: 11 | _imread = scipy.misc.imread 12 | except AttributeError: 13 | from imageio import imread as _imread 14 | 15 | import cv2 16 | 17 | pp = pprint.PrettyPrinter() 18 | 19 | get_stddev = lambda x, k_h, k_w: 1/math.sqrt(k_w*k_h*x.get_shape()[-1]) 20 | 21 | # ----------------------------- 22 | # new added functions for cyclegan 23 | class ImagePool(object): 24 | def __init__(self, maxsize=50): 25 | self.maxsize = maxsize 26 | self.num_img = 0 27 | self.images = [] 28 | 29 | def __call__(self, image): 30 | if self.maxsize <= 0: 31 | return image 32 | if self.num_img < self.maxsize: 33 | self.images.append(image) 34 | self.num_img += 1 35 | return image 36 | if np.random.rand() > 0.5: 37 | idx = int(np.random.rand()*self.maxsize) 38 | tmp1 = copy.copy(self.images[idx])[0] 39 | self.images[idx][0] = image[0] 40 | idx = int(np.random.rand()*self.maxsize) 41 | tmp2 = copy.copy(self.images[idx])[1] 42 | self.images[idx][1] = image[1] 43 | return [tmp1, tmp2] 44 | else: 45 | return image 46 | 47 | def load_test_data(image_path, fine_size=256): 48 | img = imread(image_path) 49 | img = scipy.misc.imresize(img, [fine_size, fine_size]) 50 | img = img/127.5 - 1 51 | return img 52 | 53 | def load_train_data(image_path, load_size=286, fine_size=256, is_testing=False): 54 | img_A = imread(image_path[0]) 55 | img_B = imread(image_path[1]) 56 | if not is_testing: 57 | img_A = scipy.misc.imresize(img_A, [load_size, load_size]) 58 | img_B = scipy.misc.imresize(img_B, [load_size, load_size]) 59 | h1 = int(np.ceil(np.random.uniform(1e-2, load_size-fine_size))) 60 | w1 = int(np.ceil(np.random.uniform(1e-2, load_size-fine_size))) 61 | img_A = img_A[h1:h1+fine_size, w1:w1+fine_size] 62 | img_B = img_B[h1:h1+fine_size, w1:w1+fine_size] 63 | 64 | if np.random.random() > 0.5: 65 | img_A = np.fliplr(img_A) 66 | img_B = np.fliplr(img_B) 67 | else: 68 | img_A = scipy.misc.imresize(img_A, [fine_size, fine_size]) 69 | img_B = scipy.misc.imresize(img_B, [fine_size, fine_size]) 70 | 71 | img_A = img_A/127.5 - 1. 72 | img_B = img_B/127.5 - 1. 73 | 74 | img_AB = np.concatenate((img_A, img_B), axis=2) 75 | # img_AB shape: (fine_size, fine_size, input_c_dim + output_c_dim) 76 | return img_AB 77 | 78 | # ----------------------------- 79 | 80 | def get_image(image_path, image_size, is_crop=True, resize_w=64, is_grayscale = False): 81 | return transform(imread(image_path, is_grayscale), image_size, is_crop, resize_w) 82 | 83 | def save_images(images, size, image_path): 84 | return imsave(inverse_transform(images), size, image_path) 85 | 86 | def imread(path, is_grayscale = False): 87 | if (is_grayscale): 88 | return _imread(path, flatten=True).astype(np.float) 89 | else: 90 | image = cv2.imread(path, cv2.IMREAD_UNCHANGED) 91 | image[..., :3] = cv2.cvtColor(image[..., :3], cv2.COLOR_BGR2RGB) 92 | 93 | if image.shape[2] == 4: 94 | image_fill = np.ones((image.shape[0], image.shape[1], 3)) * 255 95 | image_new = image[..., :3] * image[..., 3:] + image_fill * (1 - image[..., 3:]) 96 | cv2.imwrite('img.png', cv2.cvtColor(image_new.astype(np.uint8), cv2.COLOR_RGB2BGR)) 97 | return image_new 98 | #_imread(path, mode='RGB').astype(np.float) 99 | return image.astype(np.float32) 100 | 101 | def merge_images(images, size): 102 | return inverse_transform(images) 103 | 104 | def merge(images, size): 105 | h, w = images.shape[1], images.shape[2] 106 | img = np.zeros((h * size[0], w * size[1], 3)) 107 | for idx, image in enumerate(images): 108 | i = idx % size[1] 109 | j = idx // size[1] 110 | img[j*h:j*h+h, i*w:i*w+w, :] = image 111 | 112 | return img 113 | 114 | def imsave(images, size, path): 115 | return scipy.misc.imsave(path, merge(images, size)) 116 | 117 | def center_crop(x, crop_h, crop_w, 118 | resize_h=64, resize_w=64): 119 | if crop_w is None: 120 | crop_w = crop_h 121 | h, w = x.shape[:2] 122 | j = int(round((h - crop_h)/2.)) 123 | i = int(round((w - crop_w)/2.)) 124 | return scipy.misc.imresize( 125 | x[j:j+crop_h, i:i+crop_w], [resize_h, resize_w]) 126 | 127 | def transform(image, npx=64, is_crop=True, resize_w=64): 128 | # npx : # of pixels width/height of image 129 | if is_crop: 130 | cropped_image = center_crop(image, npx, resize_w=resize_w) 131 | else: 132 | cropped_image = image 133 | return np.array(cropped_image)/127.5 - 1. 134 | 135 | def inverse_transform(images): 136 | return (images+1.)/2. 137 | --------------------------------------------------------------------------------