├── .idea
├── Bishe.iml
├── inspectionProfiles
│ └── Project_Default.xml
├── misc.xml
├── modules.xml
└── vcs.xml
├── BusterNetCore.py
├── README.md
├── __pycache__
├── BusterNetCore.cpython-37.pyc
├── UIloadingpicture.cpython-37.pyc
├── UIlogin.cpython-37.pyc
├── cropimagecir.cpython-37.pyc
├── main.cpython-37.pyc
├── main_ui.cpython-37.pyc
├── model.cpython-37.pyc
└── pasteimage.cpython-37.pyc
├── cropimagecir.py
├── fileaddress.txt
├── main.py
├── main_ui.py
├── main_ui.ui
├── masktest.png
├── model.py
├── modelUI.py
├── morepictures.py
├── pasteimage.py
├── rectangle_result.png
└── test.png
/.idea/Bishe.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/Project_Default.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/BusterNetCore.py:
--------------------------------------------------------------------------------
1 | """
2 | This file defines all BusterNet related custom layers
3 | """
4 | from __future__ import print_function
5 | from keras.layers import Conv2D, MaxPooling2D
6 | from keras.layers import Layer, Input, Lambda
7 | from keras.layers import BatchNormalization, Activation, Concatenate
8 | from keras.models import Model
9 | from keras.applications.vgg16 import preprocess_input
10 | from keras import backend as K
11 | import tensorflow as tf
12 |
13 | def std_norm_along_chs(x) :
14 | '''Data normalization along the channle axis
15 | Input:
16 | x = tensor4d, (n_samples, n_rows, n_cols, n_feats)
17 | Output:
18 | xn = tensor4d, same shape as x, normalized version of x
19 | '''
20 | avg = K.mean(x, axis=-1, keepdims=True)
21 | std = K.maximum(1e-4, K.std(x, axis=-1, keepdims=True))
22 | return (x - avg) / std
23 |
24 | def BnInception(x, nb_inc=16, inc_filt_list=[(1,1), (3,3), (5,5)], name='uinc') :
25 | '''Basic Google inception module with batch normalization
26 | Input:
27 | x = tensor4d, (n_samples, n_rows, n_cols, n_feats)
28 | nb_inc = int, number of filters in individual Conv2D
29 | inc_filt_list = list of kernel sizes, individual Conv2D kernel size
30 | name = str, name of module
31 | Output:
32 | xn = tensor4d, (n_samples, n_rows, n_cols, n_new_feats)
33 | '''
34 | uc_list = []
35 | for idx, ftuple in enumerate( inc_filt_list ) :
36 | uc = Conv2D( nb_inc, ftuple, activation='linear', padding='same', name=name+'_c%d' % idx)(x)
37 | uc_list.append(uc)
38 | if ( len( uc_list ) > 1 ) :
39 | uc_merge = Concatenate( axis=-1, name=name+'_merge')(uc_list)
40 | else :
41 | uc_merge = uc_list[0]
42 | uc_norm = BatchNormalization(name=name+'_bn')(uc_merge)
43 | xn = Activation('relu', name=name+'_re')(uc_norm)
44 | return xn
45 |
46 | class SelfCorrelationPercPooling( Layer ) :
47 | '''Custom Self-Correlation Percentile Pooling Layer
48 | Arugment:
49 | nb_pools = int, number of percentile poolings
50 | Input:
51 | x = tensor4d, (n_samples, n_rows, n_cols, n_feats)
52 | Output:
53 | x_pool = tensor4d, (n_samples, n_rows, n_cols, nb_pools)
54 | '''
55 | def __init__( self, nb_pools=256, **kwargs ) :
56 | self.nb_pools = nb_pools
57 | super( SelfCorrelationPercPooling, self ).__init__( **kwargs )
58 | def build( self, input_shape ) :
59 | self.built = True
60 | def call( self, x, mask=None ) :
61 | # parse input feature shape
62 | bsize, nb_rows, nb_cols, nb_feats = K.int_shape( x )
63 | nb_maps = nb_rows * nb_cols
64 | # self correlation
65 | x_3d = K.reshape( x, tf.stack( [ -1, nb_maps, nb_feats ] ) )
66 | x_corr_3d = tf.matmul( x_3d, x_3d, transpose_a = False, transpose_b = True ) / nb_feats
67 | x_corr = K.reshape( x_corr_3d, tf.stack( [ -1, nb_rows, nb_cols, nb_maps ] ) )
68 | # argsort response maps along the translaton dimension
69 | if ( self.nb_pools is not None ) :
70 | ranks = K.cast( K.round( tf.lin_space( 1., nb_maps - 1, self.nb_pools ) ), 'int32' )
71 | else :
72 | ranks = tf.range( 1, nb_maps, dtype = 'int32' )
73 | x_sort, _ = tf.nn.top_k( x_corr, k = nb_maps, sorted = True )
74 | # pool out x features at interested ranks
75 | # NOTE: tf v1.1 only support indexing at the 1st dimension
76 | x_f1st_sort = K.permute_dimensions( x_sort, ( 3, 0, 1, 2 ) )
77 | x_f1st_pool = tf.gather( x_f1st_sort, ranks )
78 | x_pool = K.permute_dimensions( x_f1st_pool, ( 1, 2, 3, 0 ) )
79 | return x_pool
80 | def compute_output_shape( self, input_shape ) :
81 | bsize, nb_rows, nb_cols, nb_feats = input_shape
82 | nb_pools = self.nb_pools if ( self.nb_pools is not None ) else ( nb_rows * nb_cols - 1 )
83 | return tuple( [ bsize, nb_rows, nb_cols, nb_pools ] )
84 |
85 | class BilinearUpSampling2D( Layer ) :
86 | '''Custom 2x bilinear upsampling layer
87 | Input:
88 | x = tensor4d, (n_samples, n_rows, n_cols, n_feats)
89 | Output:
90 | x2 = tensor4d, (n_samples, 2*n_rows, 2*n_cols, n_feats)
91 | '''
92 | def call( self, x, mask=None ) :
93 | bsize, nb_rows, nb_cols, nb_filts = K.int_shape(x)
94 | new_size = tf.constant( [ nb_rows * 2, nb_cols * 2 ], dtype = tf.int32 )
95 | return tf.image.resize_images( x, new_size, align_corners=True )
96 | def compute_output_shape( self, input_shape ) :
97 | bsize, nb_rows, nb_cols, nb_filts = input_shape
98 | return tuple( [ bsize, nb_rows * 2, nb_cols * 2, nb_filts ] )
99 |
100 | class ResizeBack( Layer ) :
101 | '''Custom bilinear resize layer
102 | Resize x's spatial dimension to that of r
103 |
104 | Input:
105 | x = tensor4d, (n_samples, n_rowsX, n_colsX, n_featsX )
106 | r = tensor4d, (n_samples, n_rowsR, n_colsR, n_featsR )
107 | Output:
108 | xn = tensor4d, (n_samples, n_rowsR, n_colsR, n_featsX )
109 | '''
110 | def call( self, x ) :
111 | t, r = x
112 | new_size = [ tf.shape(r)[1], tf.shape(r)[2] ]
113 | return tf.image.resize_images( t, new_size, align_corners=True )
114 | def compute_output_shape( self, input_shapes ) :
115 | tshape, rshape = input_shapes
116 | return ( tshape[0], ) + rshape[1:3] + ( tshape[-1], )
117 |
118 | class Preprocess( Layer ) :
119 | """Basic preprocess layer for BusterNet
120 | More precisely, it does the following two things
121 | 1) normalize input image size to (256,256) to speed up processing
122 | 2) substract channel-wise means if necessary
123 | """
124 | def call( self, x, mask=None ) :
125 | # parse input image shape
126 | bsize, nb_rows, nb_cols, nb_colors = K.int_shape(x)
127 | if (nb_rows != 256) or (nb_cols !=256) :
128 | # resize image if different from (256,256)
129 | x256 = tf.image.resize_bilinear( x,
130 | [256, 256],
131 | align_corners=True,
132 | name='resize' )
133 | else :
134 | x256 = x
135 | # substract channel means if necessary
136 | if K.dtype(x) == 'float32' :
137 | # input is not a 'uint8' image
138 | # assume it has already been normalized
139 | xout = x256
140 | else :
141 | # input is a 'uint8' image
142 | # substract channel-wise means
143 | xout = preprocess_input( x256 )
144 | return xout
145 | def compute_output_shape( self, input_shape ) :
146 | return (input_shape[0], 256, 256, 3)
147 |
148 | def create_cmfd_similarity_branch( img_shape=(256,256,3),
149 | nb_pools=100,
150 | name='simiDet' ) :
151 | '''Create the similarity branch for copy-move forgery detection
152 | '''
153 | #---------------------------------------------------------
154 | # Input
155 | #---------------------------------------------------------
156 | img_input = Input( shape=img_shape, name=name+'_in' )
157 | #---------------------------------------------------------
158 | # VGG16 Conv Featex
159 | #---------------------------------------------------------
160 | bname = name + '_cnn'
161 | ## Block 1
162 | x1 = Conv2D(64, (3, 3), activation='relu', padding='same', name=bname+'_b1c1')(img_input)
163 | x1 = Conv2D(64, (3, 3), activation='relu', padding='same', name=bname+'_b1c2')(x1)
164 | x1 = MaxPooling2D((2, 2), strides=(2, 2), name=bname+'_b1p')(x1)
165 | # Block 2
166 | x2 = Conv2D(128, (3, 3), activation='relu', padding='same', name=bname+'_b2c1')(x1)
167 | x2 = Conv2D(128, (3, 3), activation='relu', padding='same', name=bname+'_b2c2')(x2)
168 | x2 = MaxPooling2D((2, 2), strides=(2, 2), name=bname+'_b2p')(x2)
169 | # Block 3
170 | x3 = Conv2D(256, (3, 3), activation='relu', padding='same', name=bname+'_b3c1')(x2)
171 | x3 = Conv2D(256, (3, 3), activation='relu', padding='same', name=bname+'_b3c2')(x3)
172 | x3 = Conv2D(256, (3, 3), activation='relu', padding='same', name=bname+'_b3c3')(x3)
173 | x3 = MaxPooling2D((2, 2), strides=(2, 2), name=bname+'_b3p')(x3)
174 | # Block 4
175 | x4 = Conv2D(512, (3, 3), activation='relu', padding='same', name=bname+'_b4c1')(x3)
176 | x4 = Conv2D(512, (3, 3), activation='relu', padding='same', name=bname+'_b4c2')(x4)
177 | x4 = Conv2D(512, (3, 3), activation='relu', padding='same', name=bname+'_b4c3')(x4)
178 | x4 = MaxPooling2D((2, 2), strides=(2, 2), name=bname+'_b4p')(x4)
179 | # Local Std-Norm Normalization (within each sample)
180 | xx = Activation(std_norm_along_chs, name=bname+'_sn')(x4)
181 | #---------------------------------------------------------
182 | # Self Correlation Pooling
183 | #---------------------------------------------------------
184 | bname = name + '_corr'
185 | ## Self Correlation
186 | xcorr = SelfCorrelationPercPooling(name=bname+'_corr')(xx)
187 | ## Global Batch Normalization (across samples)
188 | xn = BatchNormalization(name=bname+'_bn')(xcorr)
189 | #---------------------------------------------------------
190 | # Deconvolution Network
191 | #---------------------------------------------------------
192 | patch_list = [(1,1),(3,3),(5,5)]
193 | # MultiPatch Featex
194 | bname = name + '_dconv'
195 | f16 = BnInception( xn, 8, patch_list, name =bname+'_mpf')
196 | # Deconv x2
197 | f32 = BilinearUpSampling2D( name=bname+'_bx2')( f16 )
198 | dx32 = BnInception( f32, 6, patch_list, name=bname+'_dx2')
199 | # Deconv x4
200 | f64a = BilinearUpSampling2D( name=bname+'_bx4a')( f32 )
201 | f64b = BilinearUpSampling2D( name=bname+'_bx4b')( dx32 )
202 | f64 = Concatenate(axis=-1, name=name+'_dx4_m')([f64a, f64b])
203 | dx64 = BnInception( f64, 4, patch_list, name=bname+'_dx4')
204 | # Deconv x8
205 | f128a = BilinearUpSampling2D( name=bname+'_bx8a')( f64a )
206 | f128b = BilinearUpSampling2D( name=bname+'_bx8b')( dx64 )
207 | f128 = Concatenate(axis=-1, name=name+'_dx8_m')([f128a, f128b])
208 | dx128 = BnInception( f128, 2, patch_list, name=bname+'_dx8')
209 | # Deconv x16
210 | f256a = BilinearUpSampling2D( name=bname+'_bx16a')( f128a )
211 | f256b = BilinearUpSampling2D( name=bname+'_bx16b')( dx128 )
212 | f256 = Concatenate(axis=-1, name=name+'_dx16_m')([f256a,f256b])
213 | dx256 = BnInception( f256, 2, patch_list, name=bname+'_dx16')
214 | # Summerize
215 | fm256 = Concatenate(axis=-1,name=name+'_mfeat')([f256a,dx256])
216 | masks = BnInception( fm256, 2, [(5,5),(7,7),(11,11)], name=bname+'_dxF')
217 | #---------------------------------------------------------
218 | # Output for Auxiliary Task
219 | #---------------------------------------------------------
220 | pred_mask = Conv2D(1, (3,3), activation='sigmoid', name=name+'_pred_mask', padding='same')(masks)
221 | #---------------------------------------------------------
222 | # End to End
223 | #---------------------------------------------------------
224 | model = Model(inputs=img_input, outputs=pred_mask, name=name)
225 | return model
226 |
227 |
228 | def create_cmfd_manipulation_branch( img_shape=(256,256,3),
229 | name='maniDet' ) :
230 | '''Create the manipulation branch for copy-move forgery detection
231 | '''
232 | #---------------------------------------------------------
233 | # Input
234 | #---------------------------------------------------------
235 | img_input = Input( shape = img_shape, name = name+'_in' )
236 | #---------------------------------------------------------
237 | # VGG16 Conv Featex
238 | #---------------------------------------------------------
239 | bname = name + '_cnn'
240 | # Block 1
241 | x1 = Conv2D(64, (3, 3), activation='relu', padding='same', name=bname+'_b1c1')(img_input)
242 | x1 = Conv2D(64, (3, 3), activation='relu', padding='same', name=bname+'_b1c2')(x1)
243 | x1 = MaxPooling2D((2, 2), strides=(2, 2), name=bname+'_b1p')(x1)
244 | # Block 2
245 | x2 = Conv2D(128, (3, 3), activation='relu', padding='same', name=bname+'_b2c1')(x1)
246 | x2 = Conv2D(128, (3, 3), activation='relu', padding='same', name=bname+'_b2c2')(x2)
247 | x2 = MaxPooling2D((2, 2), strides=(2, 2), name=bname+'_b2p')(x2)
248 | # Block 3
249 | x3 = Conv2D(256, (3, 3), activation='relu', padding='same', name=bname+'_b3c1')(x2)
250 | x3 = Conv2D(256, (3, 3), activation='relu', padding='same', name=bname+'_b3c2')(x3)
251 | x3 = Conv2D(256, (3, 3), activation='relu', padding='same', name=bname+'_b3c3')(x3)
252 | x3 = MaxPooling2D((2, 2), strides=(2, 2), name=bname+'_b3p')(x3)
253 | # Block 4
254 | x4 = Conv2D(512, (3, 3), activation='relu', padding='same', name=bname+'_b4c1')(x3)
255 | x4 = Conv2D(512, (3, 3), activation='relu', padding='same', name=bname+'_b4c2')(x4)
256 | x4 = Conv2D(512, (3, 3), activation='relu', padding='same', name=bname+'_b4c3')(x4)
257 | x4 = MaxPooling2D((2, 2), strides=(2, 2), name=bname+'_b4p')(x4)
258 | #---------------------------------------------------------
259 | # Deconvolution Network
260 | #---------------------------------------------------------
261 | patch_list = [(1,1),(3,3),(5,5)]
262 | bname = name + '_dconv'
263 | # MultiPatch Featex
264 | f16 = BnInception( x4, 8, patch_list, name =bname+'_mpf')
265 | # Deconv x2
266 | f32 = BilinearUpSampling2D(name=bname+'_bx2')( f16 )
267 | dx32 = BnInception( f32, 6, patch_list, name=bname+'_dx2')
268 | # Deconv x4
269 | f64 = BilinearUpSampling2D(name=bname+'_bx4')( dx32 )
270 | dx64 = BnInception( f64, 4, patch_list, name=bname+'_dx4')
271 | # Deconv x8
272 | f128 = BilinearUpSampling2D(name=bname+'_bx8')( dx64 )
273 | dx128 = BnInception( f128, 2, patch_list, name=bname+'_dx8')
274 | # Deconv x16
275 | f256 = BilinearUpSampling2D(name=bname+'_bx16')( dx128 )
276 | dx256 = BnInception( f256, 2, [(5,5),(7,7),(11,11)], name=bname+'_dx16')
277 | #---------------------------------------------------------
278 | # Output for Auxiliary Task
279 | #---------------------------------------------------------
280 | pred_mask = Conv2D(1, (3,3), activation='sigmoid', name=bname+'_pred_mask', padding='same')(dx256)
281 | #---------------------------------------------------------
282 | # End to End
283 | #---------------------------------------------------------
284 | model = Model(inputs=img_input, outputs=pred_mask, name = bname)
285 | return model
286 |
287 | def create_BusterNet_testing_model( weight_file=None ) :
288 | '''create a busterNet testing model with pretrained weights
289 | '''
290 | # 1. create branch model
291 | simi_branch = create_cmfd_similarity_branch()
292 | mani_branch = create_cmfd_manipulation_branch()
293 | # 2. crop off the last auxiliary task layer
294 | SimiDet = Model( inputs=simi_branch.inputs,
295 | outputs=simi_branch.layers[-2].output,
296 | name='simiFeatex' )
297 | ManiDet = Model( inputs=mani_branch.inputs,
298 | outputs=mani_branch.layers[-2].output,
299 | name='maniFeatex' )
300 | # 3. define the two-branch BusterNet model
301 | # 3.a define wrapper inputs
302 | img_raw = Input( shape=(None,None,3), name='image_in')
303 | img_in = Preprocess( name='preprocess')( img_raw )
304 | # 3.b define BusterNet Core
305 | simi_feat = SimiDet( img_in )
306 | mani_feat = ManiDet( img_in )
307 | merged_feat = Concatenate(axis=-1, name='merge')([simi_feat, mani_feat])
308 | f = BnInception( merged_feat, 3, name='fusion' )
309 | mask_out = Conv2D( 3, (3,3), padding='same', activation='softmax', name='pred_mask')(f)
310 | # 3.c define wrapper output
311 | mask_out = ResizeBack(name='restore')([mask_out, img_raw] )
312 | # 4. create BusterNet model end-to-end
313 | model = Model( inputs = img_raw, outputs = mask_out, name = 'busterNet')
314 | if weight_file is not None :
315 | try :
316 | model.load_weights( weight_file )
317 | print("INFO: successfully load pretrained weights from {}".format( weight_file ) )
318 | except Exception as e :
319 | print("INFO: fail to load pretrained weights from {} for reason: {}".format( weight_file, e ))
320 | return model
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # BachelorGraduationProject
2 | my Bachelor Graduation Project
3 |
4 | This is a python-based program which used a model designed in https://github.com/isi-vista/BusterNet.
5 | The software can achieve image cropping, pasting and saving.
6 | Moreover, simple tampered image localization and batch tampered images localization can also be completed by this software.
7 | 总之是一个python写的软件,用pyqt5做的UI,可以打开文件,检测文件(是否有复制粘帖的篡改)。可以批处理(直接处理一个文件夹的图片),也可以用户自己复制粘帖篡改然后再检测。
8 | 毕设项目。
9 |
10 |
11 |
12 | Here are some decriptions:
13 |
14 | The invention of image editing software leads to the proliferation of tampered images and makes images less credible.
15 | Copy-move tampering is a common type of forgery in digital image tampering.
16 | So, the digital image copy-move tampering and localization system will help people, especially those with occupational needs, distinguish and analyze the forged images.
17 | Therefore, using the latest copy-move tampering detection algorithm BusterNet and PyQt5 as the system framework, this paper designs a digital image copy-move tampering and localization system based on Python language, OpenCV and PIL digital image processing library.
18 | Through the model and software test, the results show that the digital image copy-move tampering and localization system has certain accuracy, so it has certain practicability.
19 |
20 | 部分成果展示:
21 | 图1: 导入图片并显示
22 |
23 | 
24 |
25 | 图2: 抠图
26 |
27 | 
28 |
29 | 图3: 贴图
30 |
31 | 
32 |
33 | 图4: 单幅定位及结果
34 |
35 | 
36 |
37 | 图5: 导入文件夹处理结果
38 |
39 | 
40 |
41 |
42 |
43 |
44 |
--------------------------------------------------------------------------------
/__pycache__/BusterNetCore.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luckyredpanda/Image-Copy-move-Tampering-Localization-System/5b3e76047a6f1fdf021ae1715c2befa719d9bae4/__pycache__/BusterNetCore.cpython-37.pyc
--------------------------------------------------------------------------------
/__pycache__/UIloadingpicture.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luckyredpanda/Image-Copy-move-Tampering-Localization-System/5b3e76047a6f1fdf021ae1715c2befa719d9bae4/__pycache__/UIloadingpicture.cpython-37.pyc
--------------------------------------------------------------------------------
/__pycache__/UIlogin.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luckyredpanda/Image-Copy-move-Tampering-Localization-System/5b3e76047a6f1fdf021ae1715c2befa719d9bae4/__pycache__/UIlogin.cpython-37.pyc
--------------------------------------------------------------------------------
/__pycache__/cropimagecir.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luckyredpanda/Image-Copy-move-Tampering-Localization-System/5b3e76047a6f1fdf021ae1715c2befa719d9bae4/__pycache__/cropimagecir.cpython-37.pyc
--------------------------------------------------------------------------------
/__pycache__/main.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luckyredpanda/Image-Copy-move-Tampering-Localization-System/5b3e76047a6f1fdf021ae1715c2befa719d9bae4/__pycache__/main.cpython-37.pyc
--------------------------------------------------------------------------------
/__pycache__/main_ui.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luckyredpanda/Image-Copy-move-Tampering-Localization-System/5b3e76047a6f1fdf021ae1715c2befa719d9bae4/__pycache__/main_ui.cpython-37.pyc
--------------------------------------------------------------------------------
/__pycache__/model.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luckyredpanda/Image-Copy-move-Tampering-Localization-System/5b3e76047a6f1fdf021ae1715c2befa719d9bae4/__pycache__/model.cpython-37.pyc
--------------------------------------------------------------------------------
/__pycache__/pasteimage.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luckyredpanda/Image-Copy-move-Tampering-Localization-System/5b3e76047a6f1fdf021ae1715c2befa719d9bae4/__pycache__/pasteimage.cpython-37.pyc
--------------------------------------------------------------------------------
/cropimagecir.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from main import *
3 |
4 | global img
5 | global point1, point2
6 |
7 | lsPointsChoose = []
8 | tpPointsChoose = []
9 | lx = []
10 | ly = []
11 |
12 | pointsCount = 0
13 | count = 0
14 | pointsMax = 5
15 |
16 | #-----------------------鼠标操作相关------------------------------------------
17 | #------------------------------------------------------------------------------
18 | lsPointsChoose = []
19 | tpPointsChoose = []
20 |
21 | pointsCount = 0
22 | count = 0
23 | pointsMax = 20
24 |
25 | def on_mouse(event, x, y, flags, param):
26 | global img, point1, point2,count,pointsMax
27 | global lsPointsChoose, tpPointsChoose #存入选择的点
28 | global pointsCount #对鼠标按下的点计数
29 | global img2, ROI_bymouse_flag
30 | global a,b
31 | img2 = img.copy() #此行代码保证每次都重新再原图画 避免画多了
32 |
33 |
34 |
35 |
36 | if event == cv2.EVENT_LBUTTONDOWN: #左键点击
37 |
38 | pointsCount=pointsCount+1
39 |
40 | # 为了保存绘制的区域,画的点稍晚清零
41 | if(pointsCount==pointsMax+1):
42 | pointsCount = 0
43 | tpPointsChoose=[]
44 | print('pointsCount:', pointsCount)
45 | point1 = (x, y)
46 | print(x, y)
47 | lx.append(x)
48 | ly.append(y)
49 | print (lx)
50 |
51 | if (pointsCount == 1):
52 | a=x;b=y;
53 | # 画出点击的点
54 | cv2.circle(img2, point1, 10, (0, 255, 0), 5)
55 |
56 | # 将选取的点保存到list列表里
57 | lsPointsChoose.append([x, y]) #用于转化为darry 提取多边形ROI
58 | tpPointsChoose.append((x, y)) #用于画点
59 | #----------------------------------------------------------------------
60 | #将鼠标选的点用直线链接起来
61 | print(len(tpPointsChoose))
62 | for i in range(len(tpPointsChoose)-1):
63 | cv2.line(img2, tpPointsChoose[i], tpPointsChoose[i+1], (0, 0, 255), 2)
64 | #----------------------------------------------------------------------
65 | #----------点击到pointMax时可以提取去绘图----------------
66 | if (pointsCount !=pointsMax):
67 | if(pointsCount > 2):
68 | if(point1==(a,b) or point1==(a+1,b) or point1==(a-1,b)or point1==(a,b+1)or point1==(a,b-1)or point1==(a-1,b-1)or point1==(a+1,b+1)):
69 | #-----------绘制感兴趣区域-----------
70 | ROI_byMouse()
71 | ROI_bymouse_flag = 1
72 | lsPointsChoose = []
73 | sys.exit(0)
74 | else:
75 | ROI_byMouse()
76 | ROI_bymouse_flag = 1
77 | lsPointsChoose = []
78 | sys.exit(0)
79 | #--------------------------------------------------------
80 | cv2.imshow('src', img2)
81 | #-------------------------右键按下清除轨迹(未完成)-----------------------------
82 | if event == cv2.EVENT_RBUTTONDOWN: #右键点击
83 | print("right-mouse")
84 | pointsCount = 0
85 |
86 |
87 | tpPointsChoose = []
88 | lsPointsChoose = []
89 |
90 |
91 | #--------------------------------------------------------------
92 | def ROI_byMouse():
93 | global src, ROI, ROI_flag, mask2
94 | mask = np.zeros(img.shape, np.uint8)
95 | pts = np.array([lsPointsChoose], np.int32)
96 | # pts.shape=(4,2)
97 | pts = pts.reshape((-1, 1, 2)) # -1代表剩下的维度自动计算
98 | # reshape 后的 pts.shape=(4。1,2)??
99 | #--------------画多边形---------------------
100 | mask = cv2.polylines(mask, [pts], True, (0, 255, 255))
101 | ##-------------填充多边形---------------------
102 | mask2 = cv2.fillPoly(mask, [pts], (255,255,255))
103 | #cv2.imshow('mask', mask2)
104 | dst = cv2.bitwise_and(img, mask)
105 |
106 | cv2.imwrite("image.png", dst)
107 | croppedmask = mask[ min(ly):max(ly),min(lx):max(lx)]
108 | cv2.imwrite("masktest.png",croppedmask)
109 |
110 | file_name = "image.png"
111 | src = cv2.imread(file_name, 1)
112 | tmp = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
113 | _,alpha = cv2.threshold(tmp,0,255,cv2.THRESH_BINARY)
114 | b, g, r = cv2.split(src)
115 | rgba = [b,g,r, alpha]
116 | dst = cv2.merge(rgba,4)
117 | cropped = dst[ min(ly):max(ly),min(lx):max(lx)]
118 | cv2.imshow("test.png", cropped)
119 | cv2.imwrite("test.png", cropped)
120 | os.remove("image.png")
121 |
122 | def main():
123 | global img,img2,ROI
124 | with open("/Users/chenweihao/PycharmProjects/Bishe/fileaddress.txt", "r") as f:
125 | data = f.read()
126 | img = cv2.imread(data)
127 |
128 | #---------------------------------------------------------
129 | #--图像预处理,设置其大小
130 | height, width = img.shape[:2]
131 | size = (int(width*1), int(height*1))
132 | img = cv2.resize(img, size, interpolation=cv2.INTER_AREA)
133 | #------------------------------------------------------------
134 | ROI = img.copy()
135 | cv2.namedWindow('src')
136 | cv2.setMouseCallback('src', on_mouse)
137 | cv2.imshow('src', img)
138 | cv2.waitKey(0)
139 | cv2.destroyAllWindows()
140 |
141 | if __name__ == '__main__':
142 |
143 | main()
144 | sys.exit(0)
--------------------------------------------------------------------------------
/fileaddress.txt:
--------------------------------------------------------------------------------
1 | /Users/chenweihao/Downloads/test
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import sys,os
2 | import cv2
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 | from PyQt5.QtWidgets import *
6 | from main_ui import *
7 |
8 | class MyMainWindow(QMainWindow, Ui_MainWindow):
9 |
10 | def __init__(self, parent=None):
11 | super(MyMainWindow, self).__init__(parent)
12 | self.setupUi(self)
13 | self.pushButton.clicked.connect(self.show_img)
14 | self.pushButton_2.clicked.connect(self.crop_image)
15 | self.pushButton_3.clicked.connect(self.paste_image)
16 | self.pushButton_8.clicked.connect(self.save_image)
17 | self.pushButton_9.clicked.connect(self.detect_image)
18 | self.pushButton_10.clicked.connect(self.detectmore_image)
19 |
20 | def text_create(self,msg):
21 | desktop_path = "/Users/chenweihao/PycharmProjects/Bishe/" # 新创建的txt文件的存放路径
22 | full_path = desktop_path +'fileaddress.txt' # 也可以创建一个.doc的word文档
23 | file = open(full_path, 'w')
24 | file.write(msg)
25 |
26 | #show image
27 | def show_img(self):
28 | global pic_path
29 | pic_path, _ = QFileDialog.getOpenFileName(self, '显示图片', '/Users/', 'Image files(*.jpg *.gif *.png *.tif)')
30 | self.text_create(pic_path)
31 | if pic_path:
32 | image2 = cv2.imread(pic_path)
33 | global show
34 | show = cv2.resize(image2, (800, 600))
35 | show2 = cv2.cvtColor(show, cv2.COLOR_BGR2RGB) # 视频色彩转换回RGB,这样才是现实的颜色
36 | showImage = QtGui.QImage(show2.data, show2.shape[1], show2.shape[0],
37 | QtGui.QImage.Format_RGB888) # 把读取到的视频数据变成QImage形式
38 | self.label.setPixmap(QtGui.QPixmap.fromImage(showImage)) # 往显示视频的Label里 显示QImage
39 |
40 | #crop
41 | def crop_image(self):
42 | result = os.path.exists('/Users/chenweihao/PycharmProjects/Bishe/fileaddress.txt')
43 | if result == True:
44 | os.system('python cropimagecir.py')
45 | QMessageBox.information(self,"成功了!","您已经抠图成功!",QMessageBox.Yes | QMessageBox.No)
46 | else:
47 | QMessageBox.information(self,"错误","请选择一张图片!",QMessageBox.Yes | QMessageBox.No)
48 |
49 | #paste
50 | def paste_image(self):
51 | result = os.path.exists('/Users/chenweihao/PycharmProjects/Bishe/fileaddress.txt')
52 | if result == True:
53 | os.system('python pasteimage.py')
54 | QMessageBox.information(self,"成功了!","您已经贴图成功!如果不满意可以重新操作。",QMessageBox.Yes | QMessageBox.No)
55 | else:
56 | QMessageBox.information(self,"错误","请选择一张图片!",QMessageBox.Yes | QMessageBox.No)
57 |
58 | #save
59 | def save_image(self):
60 | result = os.path.exists('/Users/chenweihao/PycharmProjects/Bishe/fileaddress.txt')
61 | if result == True:
62 | pic_path = "/Users/chenweihao/PycharmProjects/Bishe/rectangle_result.png"
63 | self.text_create("/Users/chenweihao/PycharmProjects/Bishe/rectangle_result.png")
64 | QMessageBox.information(self,"成功了!","图像已保存!",QMessageBox.Yes | QMessageBox.No)
65 | image2 = cv2.imread(pic_path)
66 | show = cv2.resize(image2, (800, 600))
67 | show2 = cv2.cvtColor(show, cv2.COLOR_BGR2RGB) # 视频色彩转换回RGB,这样才是现实的颜色
68 | showImage = QtGui.QImage(show2.data, show2.shape[1], show2.shape[0],
69 | QtGui.QImage.Format_RGB888)
70 | self.label.setPixmap(QtGui.QPixmap.fromImage(showImage))
71 | else:
72 | QMessageBox.information(self,"错误","请选择一张图片!",QMessageBox.Yes | QMessageBox.No)
73 |
74 | #detect
75 | def detect_image(self):
76 | QMessageBox.information(self,"检测中","检测中,请稍等!大约需要20秒。",QMessageBox.Yes | QMessageBox.No)
77 | os.system('python model.py')
78 | #detect more than one pictures
79 | def detectmore_image(self):
80 | global directory
81 | directory = QFileDialog.getExistingDirectory(self,
82 | "选取文件夹",
83 | "./")
84 | self.text_create(directory)
85 | QMessageBox.information(self,"检测中","检测中,请稍等!先喝杯茶吧。",QMessageBox.Yes | QMessageBox.No)
86 | os.system('python morepictures.py')
87 | QMessageBox.information(self,"完成了!","恭喜你!完成啦!",QMessageBox.Yes | QMessageBox.No)
88 |
89 |
90 | if __name__ == '__main__':
91 | app = QApplication(sys.argv)
92 | myWin = MyMainWindow()
93 | myWin.show()
94 | sys.exit(app.exec_())
--------------------------------------------------------------------------------
/main_ui.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Form implementation generated from reading ui file 'main_ui.ui'
4 | #
5 | # Created by: PyQt5 UI code generator 5.9.2
6 | #
7 | # WARNING! All changes made in this file will be lost!
8 |
9 | from PyQt5 import QtCore, QtGui, QtWidgets
10 |
11 | class Ui_MainWindow(object):
12 | def setupUi(self, MainWindow):
13 | MainWindow.setObjectName("MainWindow")
14 | MainWindow.resize(1058, 739)
15 | self.centralwidget = QtWidgets.QWidget(MainWindow)
16 | self.centralwidget.setObjectName("centralwidget")
17 | self.pushButton_9 = QtWidgets.QPushButton(self.centralwidget)
18 | self.pushButton_9.setGeometry(QtCore.QRect(400, 640, 101, 41))
19 | self.pushButton_9.setIconSize(QtCore.QSize(50, 50))
20 | self.pushButton_9.setObjectName("pushButton_9")
21 | self.label = QtWidgets.QLabel(self.centralwidget)
22 | self.label.setGeometry(QtCore.QRect(160, 10, 841, 611))
23 | self.label.setText("")
24 | self.label.setTextFormat(QtCore.Qt.AutoText)
25 | self.label.setObjectName("label")
26 | self.layoutWidget = QtWidgets.QWidget(self.centralwidget)
27 | self.layoutWidget.setGeometry(QtCore.QRect(20, 40, 116, 491))
28 | self.layoutWidget.setObjectName("layoutWidget")
29 | self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
30 | self.verticalLayout.setContentsMargins(0, 0, 0, 0)
31 | self.verticalLayout.setObjectName("verticalLayout")
32 | self.pushButton = QtWidgets.QPushButton(self.layoutWidget)
33 | self.pushButton.setObjectName("pushButton")
34 | self.verticalLayout.addWidget(self.pushButton)
35 | self.pushButton_2 = QtWidgets.QPushButton(self.layoutWidget)
36 | self.pushButton_2.setObjectName("pushButton_2")
37 | self.verticalLayout.addWidget(self.pushButton_2)
38 | self.pushButton_3 = QtWidgets.QPushButton(self.layoutWidget)
39 | self.pushButton_3.setObjectName("pushButton_3")
40 | self.verticalLayout.addWidget(self.pushButton_3)
41 | self.pushButton_8 = QtWidgets.QPushButton(self.layoutWidget)
42 | self.pushButton_8.setObjectName("pushButton_8")
43 | self.verticalLayout.addWidget(self.pushButton_8)
44 | self.pushButton_10 = QtWidgets.QPushButton(self.centralwidget)
45 | self.pushButton_10.setGeometry(QtCore.QRect(600, 640, 101, 41))
46 | self.pushButton_10.setIconSize(QtCore.QSize(50, 50))
47 | self.pushButton_10.setObjectName("pushButton_10")
48 | MainWindow.setCentralWidget(self.centralwidget)
49 | self.menubar = QtWidgets.QMenuBar(MainWindow)
50 | self.menubar.setGeometry(QtCore.QRect(0, 0, 1058, 22))
51 | self.menubar.setObjectName("menubar")
52 | MainWindow.setMenuBar(self.menubar)
53 | self.statusbar = QtWidgets.QStatusBar(MainWindow)
54 | self.statusbar.setObjectName("statusbar")
55 | MainWindow.setStatusBar(self.statusbar)
56 |
57 | self.retranslateUi(MainWindow)
58 | QtCore.QMetaObject.connectSlotsByName(MainWindow)
59 |
60 | def retranslateUi(self, MainWindow):
61 | _translate = QtCore.QCoreApplication.translate
62 | MainWindow.setWindowTitle(_translate("MainWindow", "数字图像复制粘贴篡改与定位系统"))
63 | self.pushButton_9.setText(_translate("MainWindow", "单幅定位"))
64 | self.pushButton.setText(_translate("MainWindow", "打开图片"))
65 | self.pushButton_2.setText(_translate("MainWindow", "自由抠图"))
66 | self.pushButton_3.setText(_translate("MainWindow", "图像粘贴"))
67 | self.pushButton_8.setText(_translate("MainWindow", "结果保存"))
68 | self.pushButton_10.setText(_translate("MainWindow", "测试集定位"))
69 |
70 |
--------------------------------------------------------------------------------
/main_ui.ui:
--------------------------------------------------------------------------------
1 |
2 |
3 | MainWindow
4 |
5 |
6 |
7 | 0
8 | 0
9 | 1058
10 | 739
11 |
12 |
13 |
14 | MainWindow
15 |
16 |
17 |
18 |
19 |
20 | 400
21 | 640
22 | 101
23 | 41
24 |
25 |
26 |
27 | 单幅定位
28 |
29 |
30 |
31 | 50
32 | 50
33 |
34 |
35 |
36 |
37 |
38 |
39 | 160
40 | 10
41 | 841
42 | 611
43 |
44 |
45 |
46 |
47 |
48 |
49 | Qt::AutoText
50 |
51 |
52 |
53 |
54 |
55 | 20
56 | 40
57 | 116
58 | 491
59 |
60 |
61 |
62 | -
63 |
64 |
65 | 打开图片
66 |
67 |
68 |
69 | -
70 |
71 |
72 | 自由抠图
73 |
74 |
75 |
76 | -
77 |
78 |
79 | 图像粘贴
80 |
81 |
82 |
83 | -
84 |
85 |
86 | 结果保存
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 | 600
96 | 640
97 | 101
98 | 41
99 |
100 |
101 |
102 | 测试集定位
103 |
104 |
105 |
106 | 50
107 | 50
108 |
109 |
110 |
111 |
112 |
122 |
123 |
124 |
125 |
126 |
127 |
--------------------------------------------------------------------------------
/masktest.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luckyredpanda/Image-Copy-move-Tampering-Localization-System/5b3e76047a6f1fdf021ae1715c2befa719d9bae4/masktest.png
--------------------------------------------------------------------------------
/model.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | #from __future__ import print_function
4 | import os
5 | import sys
6 | import cv2
7 | import pandas
8 | import tensorflow as tf
9 | import numpy as np
10 | import warnings
11 | from matplotlib import pyplot
12 | warnings.filterwarnings("ignore")
13 | os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
14 | import matplotlib
15 |
16 |
17 |
18 | def visualize_one_sample( X, Y, Z, figsize=(12,4)):
19 | x = np.array(X).astype('uint8')
20 | y = np.array(Y).astype('uint8')
21 | z = np.array(Z)
22 | pyplot.figure(figsize=figsize)
23 | pyplot.subplot(131)
24 | pyplot.imshow( x )
25 | pyplot.title('test image')
26 | pyplot.subplot(132)
27 | pyplot.imshow( y )
28 | pyplot.title('ground truth')
29 | pyplot.subplot(133)
30 | pyplot.imshow( z )
31 | pyplot.title('BusterNet predicted')
32 | return
33 |
34 | def text_createX(msg):
35 | desktop_path = "/Users/chenweihao/PycharmProjects/Bishe/" # 新创建的txt文件的存放路径
36 | full_path = desktop_path +'X.txt' # 也可以创建一个.doc的word文档
37 | file = open(full_path, 'w')
38 | file.write(msg)
39 |
40 | from BusterNetCore import create_BusterNet_testing_model
41 |
42 | with open("/Users/chenweihao/PycharmProjects/Bishe/fileaddress.txt", "r") as f:
43 | data = f.read()
44 |
45 | model_dir = '/Users/chenweihao/Downloads/CM/logs'
46 | sys.path.insert( 0, model_dir )
47 | busterNetModel = create_BusterNet_testing_model( os.path.join( model_dir, 'pretrained_busterNet.hd5' ) )
48 |
49 | filename_test = data # 测试其它图像,修改文件名
50 | X = cv2.imread(data)
51 | X = cv2.resize(X, (300, 300))
52 | cv2.imwrite('114.png', np.uint8(X))
53 | text_createX('/Users/chenweihao/PycharmProjects/Bishe/114.png')
54 | pyplot.subplot(1,2,1)
55 | pyplot.imshow(X)
56 | X = cv2.resize(X, (300, 300))
57 | Z = busterNetModel.predict(np.uint8(np.expand_dims(X, axis=0)), verbose = 0)
58 | Z = np.uint8(Z[0] * 255.0)
59 | Z1 = cv2.cvtColor(Z, cv2.COLOR_BGR2GRAY)
60 | ret, result_img = cv2.threshold(Z1, 90, 255, cv2.THRESH_BINARY)
61 | pyplot.subplot(1,2,2)
62 | pyplot.imshow(result_img)
63 | cv2.imwrite('114514.png', np.uint8(result_img))
64 | os.system("python modelUI.py")
65 |
66 |
--------------------------------------------------------------------------------
/modelUI.py:
--------------------------------------------------------------------------------
1 | import sys,os,cv2
2 | from PyQt5 import QtWidgets, QtCore, QtGui
3 | from PyQt5.QtGui import *
4 | from PyQt5.QtWidgets import *
5 | from PyQt5.QtCore import *
6 |
7 | global a,b
8 |
9 | with open("/Users/chenweihao/PycharmProjects/Bishe/X.txt", "r") as f:
10 | datax = f.read()
11 |
12 | class modelUI (QMainWindow):
13 |
14 | def __init__(self):
15 | super ().__init__()
16 | self.initUI ()
17 |
18 | def initUI(self):
19 |
20 |
21 | pix1 = QPixmap(datax)
22 | lb1 = QLabel(self)
23 | lb1.setGeometry(0,0,300,300)
24 | lb1.setStyleSheet("border: 1px solid black")
25 | lb1.setPixmap(pix1)
26 |
27 | pix2 = QPixmap('114514.png')
28 |
29 | lb1 = QLabel(self)
30 | lb1.setGeometry(0,300,300,300)
31 | lb1.setStyleSheet("border: 1px solid black")
32 | lb1.setPixmap(pix2)
33 |
34 |
35 | #设置窗口的位置和大小
36 | self.setGeometry(300, 600, 300, 600)
37 | #设置窗口的标题
38 | self.setWindowTitle('最终结果')
39 |
40 | self.show()
41 |
42 |
43 |
44 | if __name__ == '__main__':
45 | app = QApplication(sys.argv)
46 | md = modelUI ()
47 | os.remove('/Users/chenweihao/PycharmProjects/Bishe/X.txt')
48 | os.remove('/Users/chenweihao/PycharmProjects/Bishe/fileaddress.txt')
49 | os.remove('/Users/chenweihao/PycharmProjects/Bishe/114514.png')
50 | os.remove('/Users/chenweihao/PycharmProjects/Bishe/114.png')
51 | sys.exit (app.exec_())
52 |
53 |
54 |
--------------------------------------------------------------------------------
/morepictures.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import cv2
4 | import pandas
5 | import tensorflow as tf
6 | import numpy as np
7 | import warnings
8 | from matplotlib import pyplot
9 | warnings.filterwarnings("ignore")
10 | os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
11 | import matplotlib
12 |
13 | def visualize_one_sample( X, Y, Z, figsize=(12,4)):
14 | x = np.array(X).astype('uint8')
15 | y = np.array(Y).astype('uint8')
16 | z = np.array(Z)
17 | pyplot.figure(figsize=figsize)
18 | pyplot.subplot(131)
19 | pyplot.imshow( x )
20 | pyplot.title('test image')
21 | pyplot.subplot(132)
22 | pyplot.imshow( y )
23 | pyplot.title('ground truth')
24 | pyplot.subplot(133)
25 | pyplot.imshow( z )
26 | pyplot.title('BusterNet predicted')
27 | return
28 |
29 | from BusterNetCore import create_BusterNet_testing_model
30 |
31 | model_dir = '/Users/chenweihao/Downloads/CM/logs'
32 | sys.path.insert( 0, model_dir )
33 | busterNetModel = create_BusterNet_testing_model( os.path.join( model_dir, 'pretrained_busterNet.hd5' ) )
34 |
35 | with open("/Users/chenweihao/PycharmProjects/Bishe/fileaddress.txt", "r") as f:
36 | data = f.read()
37 |
38 | for filename in os.listdir(data):
39 | if filename.endswith('jpg') or filename.endswith('png') or filename.endswith('tif'):
40 | print (data+'/'+filename) # 测试其它图像,修改文件名
41 | X = cv2.imread(data+'/'+filename)
42 | pyplot.subplot(1,2,1)
43 | pyplot.imshow(X)
44 | Z = busterNetModel.predict( np.uint8(np.expand_dims(X, axis=0)), verbose = 0)
45 | Z = np.uint8(Z[0] * 255.0)
46 | Z1 = cv2.cvtColor(Z, cv2.COLOR_BGR2GRAY)
47 | ret, result_img = cv2.threshold(Z1, 90, 255, cv2.THRESH_BINARY)
48 | pyplot.subplot(1,2,2)
49 | pyplot.imshow(result_img)
50 | filename = filename [:-4]
51 | cv2.imwrite(data+'/'+filename + 'mask.png', np.uint8(result_img))
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
--------------------------------------------------------------------------------
/pasteimage.py:
--------------------------------------------------------------------------------
1 | from PyQt5.QtWidgets import *
2 | from PyQt5.QtCore import QCoreApplication, QRect
3 | from PyQt5.QtWidgets import QFileDialog
4 | import PIL
5 | from PIL import Image, ImageDraw, ImageFilter
6 | from PyQt5 import QtCore, QtGui, QtWidgets
7 | from PyQt5.QtWidgets import QWidget, QApplication, QLabel
8 | from PyQt5.QtCore import QRect, Qt
9 | from PyQt5.QtGui import *
10 | import sys,os
11 |
12 | global a,b
13 |
14 | with open("/Users/chenweihao/PycharmProjects/Bishe/fileaddress.txt", "r") as f:
15 | data = f.read()
16 | im1 = Image.open(data)
17 | a = im1.size[0]
18 | b = im1.size[1]
19 |
20 |
21 | class myLabel2(QMainWindow):
22 |
23 | x0 = 0
24 | y0 = 0
25 | flag = False
26 | global x, y
27 |
28 | def __init__(self, parent=None):
29 | super(myLabel2, self).__init__(parent)
30 |
31 | self.setWindowTitle('贴图界面')
32 | window_pale = QtGui.QPalette()
33 | window_pale.setBrush(self.backgroundRole(), QtGui.QBrush(QtGui.QPixmap(data)))
34 | self.setPalette(window_pale)
35 | self.resize(a,b)
36 |
37 |
38 | def mousePressEvent(self, event):
39 | self.flag = True
40 | self.x0 = event.x()
41 | self.y0 = event.y()
42 | global x
43 | global y
44 | x = self.x0
45 | y = self.y0
46 | print(x)
47 | print(y)
48 | im1 = Image.open(data)
49 | im2 = Image.open('test.png')
50 | c = im2.size[0]
51 | d = im2.size[1]
52 | mask_im =Image.open('masktest.png').resize(im2.size).convert('L')
53 | back_im = im1.copy()
54 | x = int(x-(c/2))
55 | y = int(y-(d/2))
56 | print(x)
57 | print(y)
58 | back_im.paste(im2, (x, y), mask_im)
59 | back_im.save('rectangle_result.png', quality=95)
60 | back_im.show('rectangle_result.png')
61 | self.pic = QtGui.QPixmap("rectangle_result.png")
62 |
63 | def mouseReleaseEvent(self, event):
64 | self.flag = False
65 |
66 |
67 | qapp = QApplication(sys.argv)
68 | app = myLabel2()
69 |
70 | app.show()
71 | sys.exit(qapp.exec_())
--------------------------------------------------------------------------------
/rectangle_result.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luckyredpanda/Image-Copy-move-Tampering-Localization-System/5b3e76047a6f1fdf021ae1715c2befa719d9bae4/rectangle_result.png
--------------------------------------------------------------------------------
/test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luckyredpanda/Image-Copy-move-Tampering-Localization-System/5b3e76047a6f1fdf021ae1715c2befa719d9bae4/test.png
--------------------------------------------------------------------------------