├── Dockerfile
├── Dockerfile.cpu
├── README.md
├── docker-compose.yml
├── gradcam.py
├── guidedBackprop.py
├── main.py
├── main.ui
├── maps.py
├── networks.py
├── sample_images
├── cat_dog.png
├── cat_dog_cam_resnet_conv1_0.png
├── cat_dog_cam_resnet_conv1_1.png
├── cat_dog_cam_resnet_conv2_0.png
├── cat_dog_cam_resnet_conv2_1.png
├── cat_dog_cam_vgg_conv1_0.png
├── cat_dog_cam_vgg_conv1_1.png
├── cat_dog_cam_vgg_conv2_0.png
├── cat_dog_cam_vgg_conv2_1.png
└── out_optimized.gif
├── streamReader.py
├── timed.py
├── ui.py
├── ui_main.py
└── utils.py
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM tensorflow/tensorflow:latest-gpu-py3
2 |
3 | RUN apt-get update \
4 | && apt-get install -y --no-install-recommends \
5 | build-essential \
6 | curl \
7 | software-properties-common \
8 | python3-pip \
9 | && add-apt-repository -y ppa:jonathonf/python-3.6 \
10 | && apt-get update \
11 | && apt-get install -y python3.6 python3.6-dev \
12 | && apt-get clean \
13 | && rm -rf /var/lib/apt/lists/*
14 |
15 | # pip has to be installed before setuptools, setuptools has to be installed before tensorflow
16 | RUN python3.6 -m pip install --no-cache-dir -U pip
17 | RUN python3.6 -m pip install --no-cache-dir -U setuptools
18 | # also useful
19 | RUN python3.6 -m pip install --no-cache-dir ipython requests numpy pandas quandl
20 | RUN python3.6 -m pip install --no-cache-dir tensorflow-gpu==1.11.0
21 |
22 | # Tensorflow should be fine by here
23 |
24 | RUN python3.6-config --includes \
25 | && cd /usr/bin && rm python3 && ln -s python3.6 python3
26 |
27 |
28 | RUN apt-get -y update -qq && \
29 | apt-get -y install wget \
30 | unzip \
31 |
32 | # Required
33 | build-essential \
34 | cmake \
35 | git \
36 | pkg-config \
37 | libatlas-base-dev \
38 | libgtk2.0-dev \
39 | libavcodec-dev \
40 | libavformat-dev \
41 | libswscale-dev \
42 |
43 | # Optional
44 | libtbb2 libtbb-dev \
45 | libjpeg-dev \
46 | libpng-dev \
47 | libtiff-dev \
48 | libv4l-dev \
49 | libdc1394-22-dev \
50 |
51 | qt4-default \
52 |
53 | # Missing libraries for GTK
54 | libatk-adaptor \
55 | libcanberra-gtk-module \
56 |
57 | # Tools
58 | imagemagick \
59 |
60 | # For use matplotlib.pyplot in python
61 | python3-tk \
62 | python-tk
63 |
64 | WORKDIR /
65 | # Get OpenCV
66 | RUN git clone https://github.com/opencv/opencv.git
67 | ENV OPENCV_VERSION=3.4.3
68 | RUN cd opencv &&\
69 | git checkout $OPENCV_VERSION &&\
70 | cd / &&\
71 | # Get OpenCV contrib modules
72 | git clone https://github.com/opencv/opencv_contrib &&\
73 | cd opencv_contrib &&\
74 | git checkout $OPENCV_VERSION &&\
75 | mkdir /opencv/build
76 |
77 | RUN cd /opencv/build &&\
78 | # Lets build OpenCV
79 | cmake \
80 | -D CMAKE_BUILD_TYPE=RELEASE \
81 | -D CMAKE_INSTALL_PREFIX=/usr/local \
82 | -D INSTALL_C_EXAMPLES=OFF \
83 | -D INSTALL_PYTHON_EXAMPLES=OFF \
84 | -D OPENCV_EXTRA_MODULES_PATH=/opencv_contrib/modules \
85 | -D BUILD_EXAMPLES=OFF \
86 | -D BUILD_NEW_PYTHON_SUPPORT=ON \
87 | -D BUILD_DOCS=OFF \
88 | -D BUILD_TESTS=OFF \
89 | -D BUILD_PERF_TESTS=OFF \
90 | -D WITH_TBB=ON \
91 | -D WITH_OPENMP=ON \
92 | -D WITH_IPP=ON \
93 | -D WITH_CSTRIPES=ON \
94 | -D WITH_OPENCL=ON \
95 | -D WITH_V4L=ON \
96 | -D BUILD_opencv_python3=ON \
97 | .. &&\
98 | make -j$NUM_CORES &&\
99 | make install &&\
100 | ldconfig &&\
101 | # Clean the install from sources
102 | cd / &&\
103 | rm -r /opencv &&\
104 | rm -r /opencv_contrib
105 |
106 | RUN apt-get update && apt-get -y install libgtk-3-dev libboost-all-dev
107 | RUN pip3 install PyQt5
108 | WORKDIR /src
109 | CMD ["python3", "main.py"]
110 |
--------------------------------------------------------------------------------
/Dockerfile.cpu:
--------------------------------------------------------------------------------
1 | FROM tensorflow/tensorflow:latest-py3
2 |
3 | RUN apt-get update \
4 | && apt-get install -y --no-install-recommends \
5 | build-essential \
6 | curl \
7 | software-properties-common \
8 | python3-pip \
9 | && add-apt-repository -y ppa:jonathonf/python-3.6 \
10 | && apt-get update \
11 | && apt-get install -y python3.6 python3.6-dev \
12 | && apt-get clean \
13 | && rm -rf /var/lib/apt/lists/*
14 |
15 | # pip has to be installed before setuptools, setuptools has to be installed before tensorflow
16 | RUN python3.6 -m pip install --no-cache-dir -U pip
17 | RUN python3.6 -m pip install --no-cache-dir -U setuptools
18 | # also useful
19 | RUN python3.6 -m pip install --no-cache-dir ipython requests numpy pandas quandl
20 | RUN python3.6 -m pip install --no-cache-dir tensorflow==1.11.0
21 |
22 | # Tensorflow should be fine by here
23 |
24 | RUN python3.6-config --includes \
25 | && cd /usr/bin && rm python3 && ln -s python3.6 python3
26 |
27 |
28 | RUN apt-get -y update -qq && \
29 | apt-get -y install wget \
30 | unzip \
31 |
32 | # Required
33 | build-essential \
34 | cmake \
35 | git \
36 | pkg-config \
37 | libatlas-base-dev \
38 | libgtk2.0-dev \
39 | libavcodec-dev \
40 | libavformat-dev \
41 | libswscale-dev \
42 |
43 | # Optional
44 | libtbb2 libtbb-dev \
45 | libjpeg-dev \
46 | libpng-dev \
47 | libtiff-dev \
48 | libv4l-dev \
49 | libdc1394-22-dev \
50 |
51 | qt4-default \
52 |
53 | # Missing libraries for GTK
54 | libatk-adaptor \
55 | libcanberra-gtk-module \
56 |
57 | # Tools
58 | imagemagick \
59 |
60 | # For use matplotlib.pyplot in python
61 | python3-tk \
62 | python-tk
63 |
64 | WORKDIR /
65 | # Get OpenCV
66 | RUN git clone https://github.com/opencv/opencv.git
67 | ENV OPENCV_VERSION=3.4.3
68 | RUN cd opencv &&\
69 | git checkout $OPENCV_VERSION &&\
70 | cd / &&\
71 | # Get OpenCV contrib modules
72 | git clone https://github.com/opencv/opencv_contrib &&\
73 | cd opencv_contrib &&\
74 | git checkout $OPENCV_VERSION &&\
75 | mkdir /opencv/build
76 |
77 | RUN cd /opencv/build &&\
78 | # Lets build OpenCV
79 | cmake \
80 | -D CMAKE_BUILD_TYPE=RELEASE \
81 | -D CMAKE_INSTALL_PREFIX=/usr/local \
82 | -D INSTALL_C_EXAMPLES=OFF \
83 | -D INSTALL_PYTHON_EXAMPLES=OFF \
84 | -D OPENCV_EXTRA_MODULES_PATH=/opencv_contrib/modules \
85 | -D BUILD_EXAMPLES=OFF \
86 | -D BUILD_NEW_PYTHON_SUPPORT=ON \
87 | -D BUILD_DOCS=OFF \
88 | -D BUILD_TESTS=OFF \
89 | -D BUILD_PERF_TESTS=OFF \
90 | -D WITH_TBB=ON \
91 | -D WITH_OPENMP=ON \
92 | -D WITH_IPP=ON \
93 | -D WITH_CSTRIPES=ON \
94 | -D WITH_OPENCL=ON \
95 | -D WITH_V4L=ON \
96 | -D BUILD_opencv_python3=ON \
97 | .. &&\
98 | make -j$NUM_CORES &&\
99 | make install &&\
100 | ldconfig &&\
101 | # Clean the install from sources
102 | cd / &&\
103 | rm -r /opencv &&\
104 | rm -r /opencv_contrib
105 |
106 | RUN apt-get update && apt-get -y install libgtk-3-dev libboost-all-dev
107 | RUN pip3 install PyQt5
108 | WORKDIR /src
109 | CMD ["python3", "main.py"]
110 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Real Time CNN Visualization
2 |
3 | This is a platform for real time visualization of Convolutional Neural Networks.
4 |
5 | The aim of the platform is to be a handful tool for interactive quick analysis of networks.
6 |
7 | Activation maps of convolutional layers as well activations of fully connected layer are visualized. Visualized activations can be clicked interactively for applying more advance visualization techniques to corresponding neurons.
8 |
9 | The FPS is in the order of magnitude (\*~0.4) of the FPS of the visualized network. For example ResNet50 is visualized with FPS ~40 on GTX 1080 Ti. The latter is achieved by creating a single graph for all the visualizations in such a way that given an input frame all the required visualizations in certain moment of time are obtained on the GPU by single pass through the graph without backward and forward data communications with the GPU.
10 |
11 |
12 |
13 |
14 | ## Requirements
15 |
16 | * [Docker](https://docs.docker.com/install/linux/docker-ce/ubuntu/)
17 |
18 | It is recommended to run on GPU, as with the CPU version the FPS will be very low. To run on GPU, also the following is required.
19 |
20 | * Recent NVIDIA drivers (`nvidia-384` on Ubuntu)
21 | * [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker )
22 |
23 |
24 | ## Usage
25 | ### GPU version
26 | ```
27 | docker build -t basecv . # Build Docker image which contains all the requirements
28 | docker run --runtime nvidia --env DISPLAY=$DISPLAY -v="/tmp/.X11-unix:/tmp/.X11-unix:rw" -v=$(pwd)/.keras:/root/.keras -v="$(pwd)/..:$(pwd)/.." -w=$(pwd) -it basecv python3 main.py --stream "your/stream/uri"
29 | ```
30 |
31 | ### CPU version
32 |
33 | ```
34 | docker build -t basecv -f Dockerfile.cpu . # Build Docker image which contains all the requirements
35 | docker run --env DISPLAY=$DISPLAY -v="/tmp/.X11-unix:/tmp/.X11-unix:rw" -v=$(pwd)/.keras:/root/.keras -v="$(pwd)/..:$(pwd)/.." -w=$(pwd) -it basecv python3 main.py --stream "your/stream/uri"
36 | ```
37 |
38 | ### Arguments
39 |
40 | `python3 main.py -h # Gives information on available parameters`
41 |
42 | ```
43 | usage: main.py [-h] [--stream STREAM] [--network NETWORK]
44 |
45 | optional arguments:
46 | -h, --help show this help message and exit
47 | --stream STREAM Video stram URI, webcam number or path to a video based
48 | on which the network is visualized
49 | --network NETWORK Network to visualise: One of built in keras applications
50 | (VGG16,ResNet50 ...) or path to .h5 file
51 | ```
52 |
53 | For example, one could visualize YOLO by creating it's translation to Keras as described in https://github.com/qqwweee/keras-yolo3 and then by passing path to .h5 file using --network parameter.
54 |
55 |
61 |
62 | ### Troubleshooting
63 |
64 | #### Could not connect to any X display.
65 |
66 | The X Server should allow connections from a docker container.
67 |
68 | Run `xhost +local:docker`, also check [this](https://forums.docker.com/t/start-a-gui-application-as-root-in-a-ubuntu-container/17069)
69 |
70 | ## Visualization Algorithms
71 | Currently available:
72 | * [Grad-CAM](https://arxiv.org/abs/1610.02391 "Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization")
73 | * [Guided Backprop](https://arxiv.org/abs/1412.6806 "Striving for Simplicity: The All Convolutional Net")
74 |
75 | Extendable with other algorithms, required computation for which is in the order of magnitude of forward/backward pass through the network.
76 |
77 |
78 | ## Apply Algorithms to Still Images
79 |
80 | Visualization algorithms reside in single files and can be applied to still images
81 |
82 | ```
83 | $ python3 gradcam.py -h
84 |
85 | usage: gradcam.py [-h] [-i INPUT] [-o OUTPUT] [-n NETWORK]
86 | [--convindex CONVINDEX]
87 |
88 | optional arguments:
89 | -h, --help show this help message and exit
90 | -i INPUT, --input INPUT
91 | Input image
92 | -o OUTPUT, --output OUTPUT
93 | Output Image
94 | -n NETWORK, --network NETWORK
95 | Network (VGG16,ResNet50 ...)
96 | --convindex CONVINDEX
97 | Index of convolutional layer to use in the algorithm
98 | (-1 for last layer)
99 |
100 | ```
101 |
102 |
103 |
104 |
105 | | |
106 | Last Convolutional Layer |
107 | Last-1 Convolutional Layer |
108 |
109 |
110 | | |
111 | ResNet50 |
112 | VGG16 |
113 | ResNet50 |
114 | VGG16 |
115 |
116 |
117 | | Top-1 |
118 | |
119 | |
120 | |
121 | |
122 |
123 |
124 | | Top-2 |
125 | |
126 | |
127 | |
128 | |
129 |
130 |
131 |
132 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2.4'
2 | services:
3 | vis:
4 | build: .
5 | runtime: nvidia
6 | environment:
7 | - DISPLAY=$DISPLAY
8 | volumes:
9 | - /tmp/.X11-unix:/tmp/.X11-unix:rw
10 | - ./:/src:rw
11 | - ~/.keras:/root/.keras:rw
12 |
13 | #
14 | # docker run --runtime nvidia --env DISPLAY=$DISPLAY --volume="/tmp/.X11-unix:/tmp/.X11-unix:rw" -it -v=$(pwd)/..:$(pwd)/.. -w=$(pwd) basecv bash
15 |
--------------------------------------------------------------------------------
/gradcam.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import itertools
3 | import os
4 |
5 | import tensorflow as tf
6 | import cv2
7 | import numpy as np
8 |
9 | from networks import get_network
10 | from utils import getConvOutput
11 |
12 | def gradCam(y,A, reduce_max=True):
13 | """
14 | y,A according to paper
15 | """
16 | if reduce_max:
17 | # print("max reduced")
18 | y = tf.reduce_max(y,axis=-1,keepdims=True)
19 | mapImportanceGradients = tf.gradients(y,A)
20 | importanceWeights = tf.reduce_mean(mapImportanceGradients, axis=[2,3],keepdims=True)
21 | # shape extended with one in beginning?
22 | weightsReshaped = importanceWeights[0]
23 | weightedFeatureMap = weightsReshaped*A
24 | reduced = tf.reduce_mean(weightedFeatureMap,axis=[-1])
25 | relu = tf.nn.relu(reduced)
26 | # x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
27 | # gradCamRes = gradCamRes/ tf.sqrt(tf.mean(tf.square(gradCamRes))+1e-5)
28 | # normalized = tf.nn.l2_normalize(relu)
29 | # normaliized = relu/tf.sqrt(tf.reduce_mean(tf.square(relu)+1e-5))
30 | normalized = relu/(tf.reduce_max(relu)+1e-12)
31 | # tf.square
32 | return normalized
33 |
34 | def gradCamToHeatMap(cam,im):
35 | heatShape = im.shape[:2]
36 | heatmap = cv2.resize(cam[0],heatShape)
37 | colored = np.uint8(0.7*im+0.3*cv2.applyColorMap(np.uint8(255*heatmap), cv2.COLORMAP_JET))
38 | return heatmap, colored
39 |
40 | if __name__ == "__main__":
41 | parser = argparse.ArgumentParser()
42 | parser.add_argument('-i','--input', default="sample_images/cat_dog.png",
43 | help="Input image")
44 | parser.add_argument('-o','--output', default="sample_images/cat_dog_cam.png",
45 | help="Output Image")
46 | parser.add_argument('-n','--network', default="ResNet50",
47 | help="Network (VGG16,ResNet50 ...)")
48 | parser.add_argument('--convindex', default=-1,type=int,
49 | help="Index of convolutional layer to use in the algorithm (-1 for last layer)")
50 |
51 | args = parser.parse_args()
52 | # args=parser.parse_args([])
53 |
54 | sess = tf.Session()
55 | tf.keras.backend.set_session(sess)
56 | nn,ph = get_network(name=args.network,gradients="Standard")
57 | nn.summary()
58 | from tensorflow.keras.applications.resnet50 import decode_predictions
59 | preSoftMax = nn.output.op.inputs[0]
60 | A = getConvOutput(nn,args.convindex)
61 | top_k = 2
62 | values,indecies = tf.math.top_k(preSoftMax,k=top_k)
63 | # gradCamT = gradCam(preSoftMax[...,indecies[k]],A)
64 | # gradCamT_top_list = [gradCam(preSoftMax[...,indecies[k]],A,reduce_max=False) for k in range(top_k)]
65 | gradCamT_top_list = [gradCam(values[...,k],A,reduce_max=False) for k in range(top_k)]
66 |
67 | im = cv2.imread(args.input)
68 | im = cv2.resize(im,(224,224))
69 | # res = sess.run(gradCamT, {ph: [im]})
70 | res = sess.run(gradCamT_top_list, {ph: [im]})
71 | filename, file_extension = os.path.splitext(args.output)
72 | for k in range(top_k):
73 | heatmap,colored = gradCamToHeatMap(res[k], im)
74 | cv2.imwrite(f"{filename}_{k}{file_extension}", colored)
75 | # with StreamReader("http://192.168.16.101:8081/video") as cap:
76 | #
77 | # for frame,num in zip(cap.read(),itertools.count()):
78 | # im = cv2.resize(frame,(224,224))
79 | # res = sess.run(gradCamT,{ph:[im]})
80 | # heatmap,colored = gradCamToHeatMap(res,im)
81 | # cv2.imshow("cam",colored)
82 | # cv2.imwrite(f"cams/{num}.jpeg",colored)
83 | # if cv2.waitKey(1) & 0xFF == ord('q'):
84 | # break
85 |
--------------------------------------------------------------------------------
/guidedBackprop.py:
--------------------------------------------------------------------------------
1 | # deconv gradients
2 | from tensorflow.python.ops import gen_nn_ops
3 | import tensorflow as tf
4 | from collections import namedtuple, OrderedDict
5 |
6 |
7 | BackPropTensors = namedtuple("BackpropTensors",["output","selection"])
8 |
9 | def guidedBackprop(neuronOfInterest,nnInput):
10 | vis = tf.gradients(neuronOfInterest,nnInput)
11 | return vis
12 |
13 | def registerConvBackprops(convOuts,nnInput,normalize=True,reduceMax=True):
14 | backprops = OrderedDict()
15 | for name,T in convOuts.items():
16 | x = tf.Variable(0)
17 |
18 | mapOfInterest = T[...,x]
19 | if reduceMax:
20 | mapOfInterest = tf.reduce_max(mapOfInterest)
21 | # constuct grad according to it
22 | print(f"Registering convolution layer backprop vis for {name}:{T}")
23 | gradT = guidedBackprop([mapOfInterest],nnInput)[0]
24 | if normalize:
25 | gradT = tf.nn.relu(gradT)
26 | gradT = gradT/(tf.reduce_max(gradT)+1e-10)
27 | backprops[name] = gradT,x
28 | return backprops
29 |
30 | def register_fc_backprops(fc_outs,nn_input,normalize=True):
31 | backprops = OrderedDict()
32 | for name,T in fc_outs.items():
33 | x = tf.Variable(0)
34 | neuron_of_interest = T[...,x]
35 | print(f"Registering fully connected layer backprop vis for {name}:{T}")
36 | gradT = guidedBackprop([neuron_of_interest],nn_input)[0]
37 | if normalize:
38 | gradT = tf.nn.relu(gradT)
39 | gradT = gradT/(tf.reduce_max(gradT)+1e-10)
40 | backprops[name] = BackPropTensors(gradT,x)
41 | return backprops
42 |
43 | if __name__ == "__main__":
44 | from networks import getNetwork
45 | from streamReader import StreamReader
46 | import itertools
47 | import cv2
48 | import numpy as np
49 | sess = tf.Session()
50 | tf.keras.backend.set_session(sess)
51 | nn,ph = getNetwork(name="VGG16")
52 | nn.summary()
53 | preSoftMax = nn.output.op.inputs[0]
54 | neuronOfInterest = tf.reduce_max(preSoftMax,axis=-1,keepdims=True)
55 | neuronOfInterest
56 | guidedT = guidedBackprop(neuronOfInterest,ph)
57 | guidedT
58 | im = cv2.imread('sample_images/ManCoffee.jpeg')
59 | im = cv2.imread('sample_images/cat_dog.png')
60 | im = cv2.resize(im,(224,224))
61 | res = sess.run(guidedT,{ph:[im]})
62 | cv2.imwrite("guided.jpg",res[0][0]*60000)
63 | with StreamReader("http://192.168.16.101:8081/video") as cap:
64 |
65 | for frame,num in zip(cap.read(),itertools.count()):
66 | im = cv2.resize(frame,(224,224))
67 | res = sess.run(gradCamT,{ph:[im]})
68 | heatmap,colored = gradCamToHeatMap(res,im)
69 | cv2.imshow("cam",colored)
70 | cv2.imwrite(f"cams/{num}.jpeg",colored)
71 | if cv2.waitKey(1) & 0xFF == ord('q'):
72 | break
73 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import os
2 | import argparse
3 | import asyncio
4 | import time
5 | import math
6 | from collections import namedtuple, OrderedDict
7 | import itertools
8 |
9 | import tensorflow as tf
10 | import cv2
11 | import numpy as np
12 |
13 | from streamReader import StreamReader
14 | from gradcam import gradCam, gradCamToHeatMap
15 | from guidedBackprop import registerConvBackprops, register_fc_backprops
16 | from networks import get_network
17 | from maps import mapsToGrid
18 | from utils import get_outputs_from_graph, get_outputs_from_model, getConvOutput
19 | from timed import timeit,Timer,FPS
20 |
21 | parser = argparse.ArgumentParser()
22 | parser.add_argument('--stream', default="http://192.168.16.101:8081/video",
23 | # parser.add_argument('--stream', default="http://191.167.15.101:8079",
24 | help="Video stram URI, webcam number or path to a video based on which the network is visualized")
25 | # parser.add_argument('--show', default=True,
26 | # help="Show output window")
27 | parser.add_argument('--network', default="VGG16",
28 | help="Network to visualise: One of built in keras applications (VGG16,ResNet50 ...) or path to .h5 file")
29 | args = parser.parse_args()
30 |
31 |
32 |
33 | graph = tf.get_default_graph()
34 | sess = tf.Session()
35 | sess.as_default()
36 | tf.keras.backend.set_session(sess)
37 |
38 | nn, ph = get_network(args.network)
39 |
40 | print(nn.summary())
41 |
42 | # conv_outputs = get_outputs_from_graph(type='Conv2D')
43 | conv_outputs = get_outputs_from_model(nn,layer_type='Conv2D')
44 | assert conv_outputs, "Provided network has no Convolutional layers, hence I have no idea what to visualize"
45 |
46 |
47 |
48 | conv_grids = OrderedDict( (name, mapsToGrid(output[0])) for name, output in conv_outputs.items())
49 |
50 |
51 |
52 | convBackprops = registerConvBackprops(conv_outputs,nn.input)
53 |
54 | fc_outputs = get_outputs_from_model(nn,layer_type="Dense")
55 | fc_backprops = register_fc_backprops(fc_outputs,nn.input)
56 |
57 |
58 |
59 | sess.run(tf.variables_initializer([convBackprops[name][1] for name in convBackprops ]))
60 | sess.run(tf.variables_initializer([fc_backprops[name].selection for name in fc_backprops ]))
61 |
62 | if fc_outputs:
63 | # GradCam is possible if there are fully connected layers
64 | gradCamA = getConvOutput(nn,-1)
65 | softmaxin = nn.output.op.inputs[0]
66 | camT = gradCam(softmaxin,gradCamA)
67 |
68 | # TODO: make qt part work in thread
69 | # TODO: fix this, (bad fast way to exit from programm)
70 | close_main_loop = [False]
71 |
72 | def rescale_img(image):
73 | img = np.uint8((1. - (image - np.min(image)) * 1. / (np.max(image) - np.min(image))) * 255)
74 | return img
75 |
76 | def values2Map(values, num_cols=20):
77 | size = len(values)
78 | vals_filled = np.append(values, [0] * ((num_cols - len(values) % num_cols) % num_cols))
79 | value_map = vals_filled.reshape(-1, num_cols)
80 | scaled_map = (value_map-value_map.min()) / (value_map.max()-value_map.min())
81 | img = cv2.applyColorMap(np.uint8(scaled_map*255), cv2.COLORMAP_JET)
82 | return img, size
83 |
84 | def assignWhenChanged(var,value):
85 | # Assingning variable takes much time
86 | var_value = sess.run(var)
87 | if var_value != value:
88 | print(f" Variable value changed {value}!= {var_value}")
89 | sess.run(var.assign(value))
90 |
91 | async def main(ui=None, options={}):
92 | assert ui
93 | ui.fillLayers(conv_grids.keys(), fc_outputs.keys())
94 |
95 | with StreamReader(args.stream) as cap:
96 | fps = FPS()
97 | for frame,framenum in zip(cap.read(),itertools.count()):
98 |
99 | if ui.paused:
100 | frame = old_frame
101 | else:
102 | old_frame = frame
103 | currentGridName = ui.currentConv
104 |
105 | timer = Timer("processing",silent=True)
106 | ui.loadRealImage(frame)
107 | timer.tick("image loaded")
108 | map_raw_idx = ui.convMap.raw_idx
109 | dense_raw_idx = ui.denseMap.raw_idx
110 |
111 | frame = cv2.resize(frame,(224,224))
112 | frameToShow = frame.copy()
113 | frame = np.array([frame])
114 | timer.tick("frame prepared")
115 | gridTensor,(columns,rows), mapStack= conv_grids[currentGridName]
116 | neuronBackpropT,map_neuron_selection_T = convBackprops[currentGridName]
117 | timer.tick("setting graph vars")
118 | if map_raw_idx < len(mapStack):
119 | assignWhenChanged(map_neuron_selection_T, map_raw_idx)
120 |
121 | timer.tick("running main session")
122 | fetches = {
123 | "grid": gridTensor,
124 | "map": mapStack[map_raw_idx],
125 | # "cam": camT,
126 | "neuronBackprop": neuronBackpropT,
127 | # "dense": fc_outputs[currentDense]
128 | }
129 | # if the network has fully connected layers their inspection
130 | # as well as GradCam algorithm is possible
131 | if fc_outputs:
132 | currentDense = ui.currentDense
133 | fetches.update({
134 | "dense": fc_outputs[currentDense],
135 | "cam": camT
136 | })
137 |
138 | if "dense" in fetches:
139 | assignWhenChanged(fc_backprops[currentDense].selection, dense_raw_idx)
140 |
141 | fetched = sess.run(fetches,
142 | feed_dict={ph:frame})
143 | timer.tick("Session passed")
144 | if "cam" in fetched:
145 | heatmap, coloredMap = gradCamToHeatMap(fetched["cam"],frameToShow)
146 | cv2.imshow("gradCam",coloredMap)
147 | if "dense" in fetched:
148 | activationMap, cell_numbers = values2Map(fetched["dense"][0])
149 | ui.loadActivationMap(activationMap)
150 | ui.loadActivationScrollMap(activationMap, cell_numbers)
151 | if dense_raw_idx < cell_numbers:
152 | ui.setDenseValue(fetched["dense"][0][dense_raw_idx])
153 | if "grid" in fetched:
154 | ui.loadMap(rescale_img(fetched["grid"]), (rows,columns))
155 | if "map" in fetched:
156 | ui.loadCell(rescale_img(fetched["map"]))
157 | cv2.imshow("neuron-backprop",fetched["neuronBackprop"][0])
158 |
159 | print(f"Frame Number:{framenum:5d}, FPS:{fps():2.1f}", end="\r")
160 | # cv2.imshow("neuron-backprop-fc",fc_backprop[0])
161 | if cv2.waitKey(1) & 0xFF == ord('q'):
162 | break
163 |
164 | # TODO: add check for number of cells here
165 |
166 | QApplication.processEvents()
167 |
168 | if close_main_loop[0]:
169 | break
170 |
171 |
172 |
173 | sys.exit(0)
174 |
175 | import sys
176 | import signal
177 | from ui import Ui
178 | from PyQt5.QtWidgets import QApplication
179 | # from PyQt5.QtCore import QThread
180 |
181 | def sigint_handler(*args):
182 | """Handler for the SIGINT signal."""
183 | # sys.stderr.write('\r')
184 | # if QMessageBox.question(None, '', "Are you sure you want to quit?",
185 | # QMessageBox.Yes | QMessageBox.No,
186 | # QMessageBox.No) == QMessageBox.Yes:
187 | close_main_loop[0] = True
188 |
189 |
190 | if __name__ == '__main__':
191 | loop = asyncio.get_event_loop()
192 | signal.signal(signal.SIGINT, sigint_handler)
193 |
194 | app = QApplication(sys.argv)
195 | ui = Ui()
196 | ui.show()
197 |
198 | loop.run_until_complete(main(ui=ui))
199 |
200 | # writer = tf.summary.FileWriter("outputgraph", sess.graph)
201 | # writer.close()
202 |
--------------------------------------------------------------------------------
/main.ui:
--------------------------------------------------------------------------------
1 |
2 |
3 | MainWindow
4 |
5 |
6 |
7 | 0
8 | 0
9 | 1279
10 | 821
11 |
12 |
13 |
14 | MainWindow
15 |
16 |
17 |
18 |
19 |
20 | 310
21 | 110
22 | 650
23 | 650
24 |
25 |
26 |
27 | true
28 |
29 |
30 |
31 |
32 | 0
33 | 0
34 | 648
35 | 648
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 | 20
44 | 20
45 | 251
46 | 161
47 |
48 |
49 |
50 | realtime
51 |
52 |
53 |
54 |
55 |
56 | 20
57 | 240
58 | 251
59 | 251
60 |
61 |
62 |
63 | TextLabel
64 |
65 |
66 |
67 |
68 |
69 | 20
70 | 500
71 | 250
72 | 250
73 |
74 |
75 |
76 | Your advertisement here
77 |
78 |
79 |
80 |
81 |
82 | 20
83 | 210
84 | 181
85 | 21
86 |
87 |
88 |
89 | Not chosen
90 |
91 |
92 |
93 |
94 |
95 | 400
96 | 60
97 | 541
98 | 23
99 |
100 |
101 |
102 |
103 |
104 |
105 | 980
106 | 60
107 | 251
108 | 23
109 |
110 |
111 |
112 |
113 |
114 |
115 | 210
116 | 210
117 | 61
118 | 21
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 | 1100
129 | 20
130 | 141
131 | 21
132 |
133 |
134 |
135 | Colored / GreyScale
136 |
137 |
138 | true
139 |
140 |
141 |
142 |
143 |
144 | 990
145 | 110
146 | 71
147 | 651
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 |
157 | 1080
158 | 110
159 | 171
160 | 581
161 |
162 |
163 |
164 | Qt::ScrollBarAlwaysOn
165 |
166 |
167 | Qt::ScrollBarAlwaysOff
168 |
169 |
170 | true
171 |
172 |
173 |
174 |
175 | 0
176 | 0
177 | 156
178 | 579
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 | 1080
187 | 740
188 | 61
189 | 21
190 |
191 |
192 |
193 | Value
194 |
195 |
196 |
197 |
198 |
199 | 1080
200 | 710
201 | 101
202 | 21
203 |
204 |
205 |
206 |
207 |
208 |
209 |
210 |
211 |
212 | 1190
213 | 710
214 | 51
215 | 21
216 |
217 |
218 |
219 |
220 |
221 |
222 |
223 |
224 |
225 | 1150
226 | 740
227 | 91
228 | 21
229 |
230 |
231 |
232 |
233 |
234 |
235 |
236 |
237 |
238 | 320
239 | 60
240 | 51
241 | 31
242 |
243 |
244 |
245 | Pause
246 |
247 |
248 |
249 |
259 |
260 |
261 |
262 |
263 |
264 | checkBoxColoredGreyScale
265 | stateChanged(int)
266 | MainWindow
267 | changeColorMode(int)
268 |
269 |
270 | 1159
271 | 60
272 |
273 |
274 | 962
275 | 60
276 |
277 |
278 |
279 |
280 | pushButtonPlay
281 | released()
282 | MainWindow
283 | PausePlay()
284 |
285 |
286 | 330
287 | 96
288 |
289 |
290 | 347
291 | 38
292 |
293 |
294 |
295 |
296 |
297 | changeColorMode(int)
298 | PausePlay()
299 |
300 |
301 |
--------------------------------------------------------------------------------
/maps.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import math
3 |
4 | def mapsToGrid(output):
5 | layer = output
6 | numMaps = int(layer.shape[-1])
7 | numColumns = math.ceil(numMaps**0.5)
8 | numRows = math.ceil(numMaps/numColumns)
9 | zerosNum = numRows*numColumns-numMaps
10 | mapStack =tf.unstack(layer,axis=2)
11 | map_on_tail = mapStack[-1]
12 | for i in range(zerosNum):
13 | mapStack.append(map_on_tail)
14 | rowStacks = [tf.concat(mapStack[i:i+numColumns],axis=1) for i in range(0,numColumns*numRows,numColumns)]
15 | result = tf.concat(rowStacks,axis=0)
16 | return result,(numColumns,numRows), mapStack
17 |
--------------------------------------------------------------------------------
/networks.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | from tensorflow.python.ops import gen_nn_ops
3 |
4 | tf.keras.activations.relu = tf.nn.relu
5 |
6 | @tf.RegisterGradient("Customlrn")
7 | def _CustomlrnGrad(op, grad):
8 | return grad
9 |
10 | # register Relu gradients
11 | @tf.RegisterGradient("GuidedRelu")
12 | def _GuidedReluGrad(op, grad):
13 | return tf.where(0. < grad, gen_nn_ops.relu_grad(grad, op.outputs[0]), tf.zeros_like(grad))
14 |
15 | def get_network(model="ResNet50",gradients="relu"):
16 | graph = tf.get_default_graph()
17 | gradients_overwrite_map = {'Relu': 'GuidedRelu', 'LRN': 'Customlrn'} if gradients else {}
18 | with graph.gradient_override_map(gradients_overwrite_map):
19 | if model.endswith(".h5"):
20 | print(f"Loading model from file {model}")
21 | nn = tf.keras.models.load_model(model,compile=False)
22 | return nn, nn.input
23 | else:
24 | print("Loading from keras applications")
25 | knownNets = ['DenseNet121',
26 | 'DenseNet169',
27 | 'DenseNet201',
28 | 'InceptionResNetV2',
29 | 'InceptionV3',
30 | 'MobileNet',
31 | 'MobileNetV2',
32 | 'NASNetLarge',
33 | 'NASNetMobile',
34 | 'ResNet50',
35 | 'VGG16',
36 | 'VGG19',
37 | 'Xception']
38 | # knownNets = ["ResNet50","VGG16","VGG19"]
39 | name = model
40 | assert name in knownNets , "Network should be a path to a .h5 file or one of {} ".format(knownNets)
41 | ph = tf.placeholder(tf.float32, shape=(None,224, 224,3),name="cnnInput")
42 | nn = getattr(tf.keras.applications,name)
43 | nn = nn(
44 | include_top=True,
45 | weights='imagenet',
46 | input_tensor=ph,
47 | input_shape=None,
48 | pooling=None,
49 | classes=1000
50 | )
51 | return nn,ph
52 |
--------------------------------------------------------------------------------
/sample_images/cat_dog.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cyberneuron/RT-CNN-Vis/5001f6c6d429bf94adb8d2655ac354ea319969a9/sample_images/cat_dog.png
--------------------------------------------------------------------------------
/sample_images/cat_dog_cam_resnet_conv1_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cyberneuron/RT-CNN-Vis/5001f6c6d429bf94adb8d2655ac354ea319969a9/sample_images/cat_dog_cam_resnet_conv1_0.png
--------------------------------------------------------------------------------
/sample_images/cat_dog_cam_resnet_conv1_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cyberneuron/RT-CNN-Vis/5001f6c6d429bf94adb8d2655ac354ea319969a9/sample_images/cat_dog_cam_resnet_conv1_1.png
--------------------------------------------------------------------------------
/sample_images/cat_dog_cam_resnet_conv2_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cyberneuron/RT-CNN-Vis/5001f6c6d429bf94adb8d2655ac354ea319969a9/sample_images/cat_dog_cam_resnet_conv2_0.png
--------------------------------------------------------------------------------
/sample_images/cat_dog_cam_resnet_conv2_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cyberneuron/RT-CNN-Vis/5001f6c6d429bf94adb8d2655ac354ea319969a9/sample_images/cat_dog_cam_resnet_conv2_1.png
--------------------------------------------------------------------------------
/sample_images/cat_dog_cam_vgg_conv1_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cyberneuron/RT-CNN-Vis/5001f6c6d429bf94adb8d2655ac354ea319969a9/sample_images/cat_dog_cam_vgg_conv1_0.png
--------------------------------------------------------------------------------
/sample_images/cat_dog_cam_vgg_conv1_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cyberneuron/RT-CNN-Vis/5001f6c6d429bf94adb8d2655ac354ea319969a9/sample_images/cat_dog_cam_vgg_conv1_1.png
--------------------------------------------------------------------------------
/sample_images/cat_dog_cam_vgg_conv2_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cyberneuron/RT-CNN-Vis/5001f6c6d429bf94adb8d2655ac354ea319969a9/sample_images/cat_dog_cam_vgg_conv2_0.png
--------------------------------------------------------------------------------
/sample_images/cat_dog_cam_vgg_conv2_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cyberneuron/RT-CNN-Vis/5001f6c6d429bf94adb8d2655ac354ea319969a9/sample_images/cat_dog_cam_vgg_conv2_1.png
--------------------------------------------------------------------------------
/sample_images/out_optimized.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cyberneuron/RT-CNN-Vis/5001f6c6d429bf94adb8d2655ac354ea319969a9/sample_images/out_optimized.gif
--------------------------------------------------------------------------------
/streamReader.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import cv2
3 | import itertools
4 | import argparse
5 | import time
6 |
7 | class StreamReader:
8 | def __init__(self, src=0,framesleep=None):
9 | self.framesleep = framesleep
10 | self.src = src
11 | self.cap = cv2.VideoCapture(self.src)
12 | self.grabbed, self.frame = self.cap.read()
13 | self.started = False
14 | self.read_lock = threading.Lock()
15 | def __enter__(self):
16 | self.start()
17 | # print(f"FPS is: {self.cap.get(cv2.CAP_PROP_FPS)}")
18 | # self.cap.set(cv2.CAP_PROP_FPS, 2)
19 | return self
20 | def set(self, var1, var2):
21 | self.cap.set(var1, var2)
22 |
23 | def start(self):
24 | if self.started:
25 | print('[!] Asynchroneous video capturing has already been started.')
26 | return None
27 | self.started = True
28 | self.thread = threading.Thread(target=self.update, args=())
29 | self.thread.daemon = True
30 | self.thread.start()
31 | return self
32 |
33 | def update(self):
34 | while self.started:
35 | if self.framesleep:
36 | time.sleep(self.framesleep)
37 | grabbed, frame = self.cap.read()
38 | with self.read_lock:
39 | self.grabbed = grabbed
40 | self.frame = frame
41 |
42 | def read(self):
43 | while True:
44 | grabbed = self.grabbed
45 | frame = None
46 | if grabbed:
47 | frame = self.frame.copy()
48 | if grabbed:
49 | yield frame
50 | else:
51 | break
52 | # return grabbed, frame
53 | def stop(self):
54 | self.started = False
55 | self.thread.join()
56 |
57 | def __exit__(self, exec_type, exc_value, traceback):
58 | self.stop()
59 | self.cap.release()
60 | print("Exiting capture")
61 | print(exec_type, exc_value, traceback)
62 | return True
63 |
64 | if __name__ == '__main__':
65 | parser = argparse.ArgumentParser()
66 | parser.add_argument('stream', metavar="image", default='http://192.168.16.101:8080/video',
67 | help="Image")
68 | args = parser.parse_args()
69 |
70 | with StreamReader(args.stream) as wc:
71 | # for i in itertools.count():
72 | for frame in wc.read():
73 | if cv2.waitKey(1) ==27:
74 | exit(0)
75 | cv2.imshow("frame",frame)
76 |
--------------------------------------------------------------------------------
/timed.py:
--------------------------------------------------------------------------------
1 | import time
2 | import collections
3 |
4 | def timeit(f):
5 |
6 | def timed(*args, **kw):
7 |
8 | ts = time.time()
9 | result = f(*args, **kw)
10 | te = time.time()
11 |
12 | # print('func:%r args:[%r, %r] took: %2.4f sec' % (f.__name__, args, kw, te-ts))
13 | print('func:%r took: %2.4f sec' % (f.__name__, te-ts))
14 | return result
15 |
16 | return timed
17 |
18 | class Timer:
19 | def __init__(self,name="DefaultTimer",silent=False):
20 | self.starttime = time.time()
21 | self.name = name
22 | self.silent = silent
23 | def tick(self,message=""):
24 | if not self.silent:
25 | now = time.time()
26 | print('%r: %r:%2.4f s' % (self.name, message, now-self.starttime))
27 | def reset():
28 | self.starttime = time.time()
29 | def pause():
30 | pass
31 |
32 | class FPS:
33 | def __init__(self,avarageof=50):
34 | self.frametimestamps = collections.deque(maxlen=avarageof)
35 | def __call__(self):
36 | self.frametimestamps.append(time.time())
37 | if(len(self.frametimestamps) > 1):
38 | return len(self.frametimestamps)/(self.frametimestamps[-1]-self.frametimestamps[0])
39 | else:
40 | return 0.0
41 |
--------------------------------------------------------------------------------
/ui.py:
--------------------------------------------------------------------------------
1 | from PyQt5.QtWidgets import QMainWindow, QFileDialog, QDialog, QWidget, \
2 | QApplication, QTabWidget, QLabel, QHBoxLayout, \
3 | QPushButton
4 | from PyQt5.QtCore import pyqtSignal, pyqtSlot, QDir, QObject, Qt, QThread, \
5 | QTimer, QEvent
6 | from PyQt5.QtGui import QPixmap, QPainter, QPen, QImage, QGuiApplication, \
7 | QCursor, QClipboard
8 |
9 | import numpy as np
10 | import os
11 | import sys
12 | import datetime
13 | import cv2
14 |
15 | from ui_main import Ui_MainWindow
16 |
17 |
18 | class FeaturesMap(QLabel):
19 | cell_changed = pyqtSignal(int)
20 |
21 | def __init__(self):
22 | super().__init__()
23 | self._rows = 1
24 | self._cols = 1
25 | self.raw_idx = 0
26 |
27 | def mousePressEvent(self, event):
28 | self.raw_idx = self.getRawNumber(event.pos())
29 | self.cell_changed.emit(self.raw_idx)
30 |
31 | def setGridSize(self, size):
32 | self._rows, self._cols = size
33 |
34 | def getRawNumber(self, pos):
35 | cubeWidth = self.width() // self._rows
36 | cubeHeight = self.height() // self._cols
37 | cur_row = pos.x() // cubeWidth
38 | cur_col = pos.y() // cubeHeight
39 | return self._rows * cur_col + cur_row
40 |
41 | def resetIdx(self):
42 | self.raw_idx = 0
43 |
44 | class DenseMap(QLabel):
45 | cell_changed = pyqtSignal(int)
46 |
47 | def __init__(self, parent):
48 | super().__init__()
49 | self._rows = 1
50 | self._cols = 1
51 | self.raw_idx = 0
52 | self.cell_limit = 1
53 |
54 | def setGridSize(self, shape):
55 | self._cols, self._rows, _ = shape
56 |
57 | def setCellNumbers(self, number):
58 | self.cell_limit = number
59 |
60 | def mousePressEvent(self, event):
61 | self.raw_idx = self.getRawNumber(event.pos())
62 | self.cell_changed.emit(self.raw_idx)
63 |
64 | def getRawNumber(self, pos):
65 | p_height = self.pixmap().height()
66 | s_height = self.height()
67 | if p_height < s_height:
68 | cubeWidth = self.width() / self._rows
69 | dif = (s_height - p_height) // 2
70 | cubeHeight = p_height / self._cols
71 | cur_row = pos.x() // cubeWidth
72 | cur_col = (pos.y() - dif ) // cubeHeight
73 | else:
74 | cubeWidth = self.width() / self._rows
75 | cubeHeight = self.height() / self._cols
76 | cur_row = pos.x() // cubeWidth
77 | cur_col = pos.y() // cubeHeight
78 | return int(self._rows * cur_col + cur_row)
79 |
80 | def resetIdx(self):
81 | self.raw_idx = 0
82 |
83 | class Ui(QMainWindow):
84 | def __init__(self):
85 | super().__init__()
86 | self.ui = Ui_MainWindow()
87 | self.ui.setupUi(self)
88 | self._show_colored = True
89 |
90 | self.conv_layers = ['Conv1', 'Layer2', 'Layer3'] * 8
91 | self.dense_layers = ['FC1', 'Layer2', 'Layer3'] * 8
92 | self.fillLayers(self.conv_layers, self.dense_layers)
93 |
94 | self.convMap = FeaturesMap()
95 | self.ui.scrollAreaMap.setWidget(self.convMap)
96 |
97 | self.currentConv = self.conv_layers[0]
98 | self.currentDense = self.dense_layers[0]
99 |
100 | self.convMap.cell_changed.connect(self.changeMapNum)
101 | self.ui.comboBoxConv.currentTextChanged.connect(self.ConvLayerChanged)
102 | self.ui.comboBoxFC.currentTextChanged.connect(self.DenseLayerChanged)
103 |
104 | self.paused = False
105 | self.denseMap = DenseMap(self.ui.scrollAreaDense)
106 | self.ui.scrollAreaDense.setWidget(self.denseMap)
107 | self.denseMap.cell_changed.connect(self.changeDenseNum)
108 |
109 | @pyqtSlot()
110 | def PausePlay(self):
111 | if self.paused:
112 | self.ui.pushButtonPlay.setText('Pause')
113 | else:
114 | self.ui.pushButtonPlay.setText('Play')
115 | self.paused = not self.paused
116 |
117 | @pyqtSlot(int)
118 | def changeColorMode(self, state):
119 | self._show_colored = bool(state)
120 |
121 | @pyqtSlot(int)
122 | def changeMapNum(self, num):
123 | self.ui.labelMapNum.setText(str(num))
124 |
125 | @pyqtSlot(int)
126 | def changeDenseNum(self, num):
127 | if num < self.denseMap.cell_limit:
128 | self.ui.labelDenseNum.setText(str(num))
129 | else:
130 | self.ui.labelDenseNum.setText('Out')
131 |
132 | @pyqtSlot(str)
133 | def ConvLayerChanged(self, layer_name):
134 | self.currentConv = layer_name
135 | self.ui.labelMapName.setText(self.currentConv)
136 | self.ui.labelMapNum.setText('0')
137 | self.convMap.resetIdx()
138 |
139 | @pyqtSlot(str)
140 | def DenseLayerChanged(self, layer_name):
141 | self.currentDense = layer_name
142 | self.ui.labelDenseName.setText(layer_name)
143 | self.ui.labelDenseNum.setText('0')
144 | # self.denseMap.resetIdx()
145 |
146 | def fillLayers(self, conv_layers, dense_layers):
147 | self.ui.comboBoxConv.clear()
148 | self.ui.comboBoxFC.clear()
149 | if conv_layers:
150 | self.ui.comboBoxConv.addItems(conv_layers)
151 | self.conv_layers = conv_layers
152 | if dense_layers:
153 | self.ui.comboBoxFC.addItems(dense_layers)
154 | self.dense_layers = dense_layers
155 |
156 | # def setButtons(self, buttons):
157 | # widget = QWidget()
158 | # layout = QHBoxLayout()
159 | # for button in buttons:
160 | # btn = QPushButton(button)
161 | # btn.setFlat(True)
162 | # btn.clicked.connect(self.btnClicked)
163 | # layout.addWidget(btn)
164 | # widget.setLayout(layout)
165 | # self.ui.scrollArea.setWidget(widget)
166 | #
167 | # self.buttons = list(sorted(buttons))
168 | # self.currentConv = self.buttons[0]
169 |
170 | def loadActivationScrollMap(self, map, cell_numbers):
171 | label = self.denseMap
172 | label.setCellNumbers(cell_numbers)
173 | label.setGridSize(map.shape)
174 | img = cv2.cvtColor(map, cv2.COLOR_BGR2RGB)
175 | height, width, _ = img.shape
176 | bytesPerLine = 3 * width
177 | qImg = QImage(img, width, height, bytesPerLine, QImage.Format_RGB888)
178 | scroll_area = self.ui.scrollAreaDense
179 | width = scroll_area.width() - scroll_area.verticalScrollBar().width() - 2
180 | pixmap = QPixmap(qImg).scaledToWidth(width)
181 | label.setGeometry(0, 0, pixmap.width(), pixmap.height())
182 | label.setPixmap(pixmap)
183 |
184 | def loadActivationMap(self, map):
185 | img = cv2.cvtColor(map, cv2.COLOR_BGR2RGB)
186 | height, width, _ = img.shape
187 | bytesPerLine = 3 * width
188 | qImg = QImage(img, width, height, bytesPerLine, QImage.Format_RGB888)
189 | label = self.ui.labelMapDense
190 | width, height = label.width(), label.height()
191 | pixmap = QPixmap(qImg).scaled(width, height, Qt.KeepAspectRatio)
192 | label.setPixmap(pixmap)
193 |
194 | def loadMap(self, image, size):
195 | img = cv2.resize(image, (self.convMap.width(), self.convMap.height()),
196 | interpolation = cv2.INTER_NEAREST)
197 | if self._show_colored:
198 | img = cv2.applyColorMap(img, cv2.COLORMAP_JET)
199 | # hsvImg = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
200 | # hsvImg[...,2] = hsvImg[...,2]*0.2
201 | # cv2.cvtColor(hsvImg,cv2.COLOR_HSV2RGB)
202 | height, width, _ = img.shape
203 | bytesPerLine = 3 * width
204 | qImg = QImage(img, width, height, bytesPerLine, QImage.Format_RGB888)
205 | else:
206 | height, width = img.shape
207 | qImg = QImage(img, width, height, QImage.Format_Grayscale8)
208 | self.convMap.setPixmap(QPixmap(qImg))
209 | self.convMap.setGridSize(size)
210 |
211 | def setDenseValue(self, val):
212 | self.ui.labelDenseVal.setText(str(val))
213 |
214 | def loadRealImage(self, image):
215 | label = self.ui.labelInput
216 | img = cv2.resize(image, (label.width(), label.height()))
217 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
218 | height, width, _ = img.shape
219 | bytesPerLine = 3 * width
220 | qImg = QImage(img, width, height, bytesPerLine, QImage.Format_RGB888)
221 | label.setPixmap(QPixmap(qImg))
222 |
223 | def loadCell(self, image):
224 | img = cv2.resize(image, (224, 224),interpolation = cv2.INTER_NEAREST)
225 | if self._show_colored:
226 | img = cv2.applyColorMap(img, cv2.COLORMAP_JET)
227 | height, width, _ = img.shape
228 | bytesPerLine = 3 * width
229 | qImg = QImage(img, width, height, bytesPerLine, QImage.Format_RGB888)
230 | else:
231 | height, width = img.shape
232 | qImg = QImage(img, width, height, QImage.Format_Grayscale8)
233 |
234 | self.ui.labelZoomed.setPixmap(QPixmap(qImg))
235 |
236 |
237 | def run_ui():
238 | app = QApplication(sys.argv)
239 | ui = Ui()
240 | # ui.setGeometry(500, 300, 300, 400)
241 | ui.show()
242 | sys.exit(app.exec_())
243 |
244 |
245 | if __name__ == '__main__':
246 | run_ui()
247 |
--------------------------------------------------------------------------------
/ui_main.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Form implementation generated from reading ui file 'main.ui'
4 | #
5 | # Created by: PyQt5 UI code generator 5.11.3
6 | #
7 | # WARNING! All changes made in this file will be lost!
8 |
9 | from PyQt5 import QtCore, QtGui, QtWidgets
10 |
11 | class Ui_MainWindow(object):
12 | def setupUi(self, MainWindow):
13 | MainWindow.setObjectName("MainWindow")
14 | MainWindow.resize(1279, 821)
15 | self.centralwidget = QtWidgets.QWidget(MainWindow)
16 | self.centralwidget.setObjectName("centralwidget")
17 | self.scrollAreaMap = QtWidgets.QScrollArea(self.centralwidget)
18 | self.scrollAreaMap.setGeometry(QtCore.QRect(310, 110, 650, 650))
19 | self.scrollAreaMap.setWidgetResizable(True)
20 | self.scrollAreaMap.setObjectName("scrollAreaMap")
21 | self.scrollAreaMapWidgetContents = QtWidgets.QWidget()
22 | self.scrollAreaMapWidgetContents.setGeometry(QtCore.QRect(0, 0, 648, 648))
23 | self.scrollAreaMapWidgetContents.setObjectName("scrollAreaMapWidgetContents")
24 | self.scrollAreaMap.setWidget(self.scrollAreaMapWidgetContents)
25 | self.labelInput = QtWidgets.QLabel(self.centralwidget)
26 | self.labelInput.setGeometry(QtCore.QRect(20, 20, 251, 161))
27 | self.labelInput.setObjectName("labelInput")
28 | self.labelZoomed = QtWidgets.QLabel(self.centralwidget)
29 | self.labelZoomed.setGeometry(QtCore.QRect(20, 240, 251, 251))
30 | self.labelZoomed.setObjectName("labelZoomed")
31 | self.labelFiltered = QtWidgets.QLabel(self.centralwidget)
32 | self.labelFiltered.setGeometry(QtCore.QRect(20, 500, 250, 250))
33 | self.labelFiltered.setObjectName("labelFiltered")
34 | self.labelMapName = QtWidgets.QLabel(self.centralwidget)
35 | self.labelMapName.setGeometry(QtCore.QRect(20, 210, 181, 21))
36 | self.labelMapName.setObjectName("labelMapName")
37 | self.comboBoxConv = QtWidgets.QComboBox(self.centralwidget)
38 | self.comboBoxConv.setGeometry(QtCore.QRect(400, 60, 541, 23))
39 | self.comboBoxConv.setObjectName("comboBoxConv")
40 | self.comboBoxFC = QtWidgets.QComboBox(self.centralwidget)
41 | self.comboBoxFC.setGeometry(QtCore.QRect(980, 60, 251, 23))
42 | self.comboBoxFC.setObjectName("comboBoxFC")
43 | self.labelMapNum = QtWidgets.QLabel(self.centralwidget)
44 | self.labelMapNum.setGeometry(QtCore.QRect(210, 210, 61, 21))
45 | self.labelMapNum.setText("")
46 | self.labelMapNum.setObjectName("labelMapNum")
47 | self.checkBoxColoredGreyScale = QtWidgets.QCheckBox(self.centralwidget)
48 | self.checkBoxColoredGreyScale.setGeometry(QtCore.QRect(1100, 20, 141, 21))
49 | self.checkBoxColoredGreyScale.setChecked(True)
50 | self.checkBoxColoredGreyScale.setObjectName("checkBoxColoredGreyScale")
51 | self.labelMapDense = QtWidgets.QLabel(self.centralwidget)
52 | self.labelMapDense.setGeometry(QtCore.QRect(990, 110, 71, 651))
53 | self.labelMapDense.setText("")
54 | self.labelMapDense.setObjectName("labelMapDense")
55 | self.scrollAreaDense = QtWidgets.QScrollArea(self.centralwidget)
56 | self.scrollAreaDense.setGeometry(QtCore.QRect(1080, 110, 171, 581))
57 | self.scrollAreaDense.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
58 | self.scrollAreaDense.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
59 | self.scrollAreaDense.setWidgetResizable(True)
60 | self.scrollAreaDense.setObjectName("scrollAreaDense")
61 | self.scrollAreaWidgetContents = QtWidgets.QWidget()
62 | self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 156, 579))
63 | self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
64 | self.scrollAreaDense.setWidget(self.scrollAreaWidgetContents)
65 | self.label = QtWidgets.QLabel(self.centralwidget)
66 | self.label.setGeometry(QtCore.QRect(1080, 740, 81, 21))
67 | self.label.setObjectName("label")
68 | self.labelDenseName = QtWidgets.QLabel(self.centralwidget)
69 | self.labelDenseName.setGeometry(QtCore.QRect(1080, 710, 101, 21))
70 | self.labelDenseName.setText("")
71 | self.labelDenseName.setObjectName("labelDenseName")
72 | self.labelDenseNum = QtWidgets.QLabel(self.centralwidget)
73 | self.labelDenseNum.setGeometry(QtCore.QRect(1190, 710, 51, 21))
74 | self.labelDenseNum.setText("")
75 | self.labelDenseNum.setObjectName("labelDenseNum")
76 | self.labelDenseVal = QtWidgets.QLabel(self.centralwidget)
77 | self.labelDenseVal.setGeometry(QtCore.QRect(1170, 740, 71, 21))
78 | self.labelDenseVal.setText("")
79 | self.labelDenseVal.setObjectName("labelDenseVal")
80 | self.pushButtonPlay = QtWidgets.QPushButton(self.centralwidget)
81 | self.pushButtonPlay.setGeometry(QtCore.QRect(320, 60, 51, 31))
82 | self.pushButtonPlay.setObjectName("pushButtonPlay")
83 | MainWindow.setCentralWidget(self.centralwidget)
84 | self.menubar = QtWidgets.QMenuBar(MainWindow)
85 | self.menubar.setGeometry(QtCore.QRect(0, 0, 1279, 20))
86 | self.menubar.setObjectName("menubar")
87 | MainWindow.setMenuBar(self.menubar)
88 | self.statusbar = QtWidgets.QStatusBar(MainWindow)
89 | self.statusbar.setObjectName("statusbar")
90 | MainWindow.setStatusBar(self.statusbar)
91 |
92 | self.retranslateUi(MainWindow)
93 | self.checkBoxColoredGreyScale.stateChanged['int'].connect(MainWindow.changeColorMode)
94 | self.pushButtonPlay.released.connect(MainWindow.PausePlay)
95 | QtCore.QMetaObject.connectSlotsByName(MainWindow)
96 |
97 | def retranslateUi(self, MainWindow):
98 | _translate = QtCore.QCoreApplication.translate
99 | MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
100 | self.labelInput.setText(_translate("MainWindow", "realtime"))
101 | self.labelZoomed.setText(_translate("MainWindow", "TextLabel"))
102 | self.labelFiltered.setText(_translate("MainWindow", "Your advertisement here"))
103 | self.labelMapName.setText(_translate("MainWindow", "Not chosen"))
104 | self.checkBoxColoredGreyScale.setText(_translate("MainWindow", "Colored / GreyScale"))
105 | self.label.setText(_translate("MainWindow", "Value"))
106 | self.pushButtonPlay.setText(_translate("MainWindow", "Pause"))
107 |
108 |
--------------------------------------------------------------------------------
/utils.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | from collections import OrderedDict
3 |
4 | def get_outputs_from_graph(type='Conv2D'):
5 | assert type in ['Conv2D']
6 | graph = tf.get_default_graph()
7 | return OrderedDict((i.name, i.outputs[0]) for i in graph.get_operations() if i.type.lower() == type.lower())
8 |
9 | def get_outputs_from_model(model,layer_type="Dense",pre_eactivation=True):
10 | assert layer_type in ["Dense","Conv2D"]
11 | Layer = getattr(tf.keras.layers,layer_type)
12 | layers = model.layers
13 | def get_layer_output(layer):
14 | if pre_eactivation:
15 | return layer.output.op.inputs[0]
16 | else:
17 | return layer.output
18 | # Outputs = namedtuple(type+"Outputs",[layer.name if type(layer) is Layer])
19 | return OrderedDict( (layer.name, get_layer_output(layer) ) for layer in layers if type(layer) is Layer)
20 |
21 | def getConvOutput(model,index=-1):
22 | layers = model.layers
23 | return [layer.output for layer in layers if type(layer) is tf.keras.layers.Conv2D][index]
24 |
--------------------------------------------------------------------------------