
143 |
144 |
145 |
146 |
--------------------------------------------------------------------------------
/first_image/first_image.py:
--------------------------------------------------------------------------------
1 | # _*_ coding: utf-8 _*_
2 | __author__ = 'LelandYan'
3 | __date__ = '2019/5/16 19:32'
4 | import numpy as np
5 | import cv2
6 | import matplotlib.pyplot as plt
7 | import copy
8 | from pylab import mpl
9 | import skimage
10 | # 防止中文乱码
11 | mpl.rcParams['font.sans-serif'] = ['SimHei']
12 |
13 |
14 | class processing_image:
15 | def __init__(self, filename="./raw_data/1.jpg", output="./out_data"):
16 | self.filename = filename
17 | self.output = output
18 |
19 | def op_gray_to_four_type(self, kernel=(9, 9), erode_iter=5, dilate_iter=5):
20 |
21 | img = cv2.imread(self.filename)
22 | # gray
23 | img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
24 | # erode dilate
25 | closed = cv2.erode(img, None, iterations=erode_iter)
26 | img = cv2.dilate(closed, None, iterations=dilate_iter)
27 |
28 | kernel = np.ones(kernel, np.uint8)
29 | # open operation
30 | img_open = cv2.morphologyEx(img, op=cv2.MORPH_OPEN, kernel=kernel)
31 | # close operation
32 | img_close = cv2.morphologyEx(img, op=cv2.MORPH_CLOSE, kernel=kernel)
33 | # gradient operation
34 | img_grad = cv2.morphologyEx(img, op=cv2.MORPH_GRADIENT, kernel=kernel)
35 | # tophat operation
36 | img_tophat = cv2.morphologyEx(img, op=cv2.MORPH_TOPHAT, kernel=kernel)
37 | # blackhat operation
38 | img_blackhat = cv2.morphologyEx(img, op=cv2.MORPH_BLACKHAT, kernel=kernel)
39 | # Plot the images
40 | images = [img, img_open, img_close, img_grad,
41 | img_tophat, img_blackhat]
42 | names = ["raw_img", "img_open", "img_close", "img_grad", "img_tophat", "img_blackhat"]
43 | cv2.imwrite(self.output+"/gradient_image1.jpg",img_grad)
44 | fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(15, 15))
45 | for ind, p in enumerate(images):
46 | ax = axs[ind // 3, ind % 3]
47 | ax.imshow(p, cmap='gray')
48 | ax.set_title(names[ind])
49 | ax.axis('off')
50 | plt.show()
51 |
52 | def op_first_to_three_type(self, flag=False):
53 | # 全局阈值
54 | def threshold_demo(image):
55 | gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # 把输入图像灰度化
56 | # 直接阈值化是对输入的单通道矩阵逐像素进行阈值分割。
57 | ret, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_TRIANGLE)
58 | if flag:
59 | cv2.imwrite(self.output + "/global_binary_first1.jpg", binary)
60 | return binary
61 |
62 | # 局部阈值
63 | def local_threshold(image):
64 | gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # 把输入图像灰度化
65 | # 自适应阈值化能够根据图像不同区域亮度分布,改变阈值
66 | binary = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 25, 10)
67 |
68 | if flag:
69 | cv2.imwrite(self.output + "/local_binary_first1.jpg", binary)
70 | return binary
71 |
72 | # 用户自己计算阈值
73 | def custom_threshold(image):
74 | gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # 把输入图像灰度化
75 | h, w = gray.shape[:2]
76 | m = np.reshape(gray, [1, w * h])
77 | mean = m.sum() / (w * h)
78 | ret, binary = cv2.threshold(gray, mean, 255, cv2.THRESH_BINARY)
79 | if flag:
80 | cv2.imwrite(self.output + "/custom_binary_first1.jpg", binary)
81 | return binary
82 |
83 | if flag:
84 | src = cv2.imread("./out_data/gray_cutting_image1.jpg")
85 | else:
86 | src = cv2.imread(self.filename)
87 | src = cv2.cvtColor(src, cv2.COLOR_BGR2RGB)
88 | global_scr = threshold_demo(src)
89 | local_scr = local_threshold(src)
90 | custom_src = custom_threshold(src)
91 | images = [src, global_scr, local_scr,
92 | custom_src]
93 | names = ["src", "global_scr", "local_scr", "custom_src"]
94 | fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(10, 10))
95 | for ind, p in enumerate(images):
96 | ax = axs[ind // 2, ind % 2]
97 | ax.imshow(p, cmap='gray')
98 | ax.set_title(names[ind])
99 | ax.axis('off')
100 | plt.show()
101 |
102 | def op_cutting_image(self):
103 | raw_img = cv2.imread(self.filename)
104 | img = cv2.imread("./out_data/gradient_image1.jpg")
105 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
106 | blurred = cv2.bilateralFilter(gray, 7, sigmaSpace=75, sigmaColor=75)
107 | ret, binary = cv2.threshold(blurred, 127, 255, cv2.THRESH_BINARY)
108 | closed = cv2.dilate(binary, None, iterations=130)
109 | closed = cv2.erode(closed, None, iterations=127)
110 |
111 | _, contours, hierarchy = cv2.findContours(closed, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
112 | c = sorted(contours, key=cv2.contourArea, reverse=True)[0]
113 |
114 | # compute the rotated bounding box of the largest contour
115 | rect = cv2.minAreaRect(c)
116 | box = np.int0(cv2.boxPoints(rect))
117 | # draw a bounding box arounded the detected barcode and display the image
118 | draw_img = cv2.drawContours(raw_img.copy(), [box], -1, (0, 0, 255), 3)
119 |
120 | h, w, _ = img.shape
121 | Xs = [i[0] for i in box]
122 | Ys = [i[1] for i in box]
123 | x1 = min(Xs)
124 | x2 = max(Xs)
125 | y1 = min(Ys)
126 | y2 = max(Ys)
127 | hight = y2 - y1
128 | width = x2 - x1
129 | crop_img = img[0:h - hight, x1:x1 + width]
130 | raw_img = raw_img[0:h - hight, x1:x1 + width]
131 | cv2.imwrite(self.output + "/raw_draw_image1.jpg", draw_img)
132 | cv2.imwrite(self.output + "/raw_cutting_image1.jpg", raw_img)
133 | cv2.imwrite(self.output + "/gray_cutting_image1.jpg", crop_img)
134 |
135 | def op_edge_test(self):
136 | def gray_dege_test():
137 | img = cv2.imread("./out_data/gradient_image1.jpg")
138 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
139 | blurred = cv2.GaussianBlur(gray, (9, 9), 0)
140 | ret, binary = cv2.threshold(blurred, 127, 255, cv2.THRESH_BINARY)
141 | closed = cv2.dilate(binary, None, iterations=110)
142 | closed = cv2.erode(closed, None, iterations=120)
143 |
144 | _, contours, hierarchy = cv2.findContours(closed, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
145 |
146 | cv2.drawContours(img, contours, -1, (0, 0, 255), 3)
147 | plt.imshow(img)
148 | plt.show()
149 | cv2.imwrite(self.output + "/gray_edge_test.jpg", img)
150 |
151 | def fourier_edge_test():
152 | img = cv2.imread('./out_data/gradient_image1.jpg', 0)
153 | f = np.fft.fft2(img)
154 | fshift = np.fft.fftshift(f)
155 |
156 | rows, cols = img.shape
157 | crow, ccol = int(rows / 2), int(cols / 2)
158 | for i in range(crow - 30, crow + 30):
159 | for j in range(ccol - 30, ccol + 30):
160 | fshift[i][j] = 0.0
161 | f_ishift = np.fft.ifftshift(fshift)
162 | img_back = np.fft.ifft2(f_ishift) # 进行高通滤波
163 | # 取绝对值
164 | img_back = np.abs(img_back)
165 | plt.subplot(121), plt.imshow(img, cmap='gray') # 因图像格式问题,暂已灰度输出
166 | plt.title('Input Image'), plt.xticks([]), plt.yticks([])
167 | # 先对灰度图像进行伽马变换,以提升暗部细节
168 | rows, cols = img_back.shape
169 | gamma = copy.deepcopy(img_back)
170 | rows = img.shape[0]
171 | cols = img.shape[1]
172 | for i in range(rows):
173 | for j in range(cols):
174 | gamma[i][j] = 5.0 * pow(gamma[i][j], 0.34) # 0.34这个参数是我手动调出来的,根据不同的图片,可以选择不同的数值
175 | # 对灰度图像进行反转
176 |
177 | for i in range(rows):
178 | for j in range(cols):
179 | gamma[i][j] = 255 - gamma[i][j]
180 |
181 | plt.subplot(122), plt.imshow(gamma, cmap='gray')
182 | plt.title('Result in HPF'), plt.xticks([]), plt.yticks([])
183 | cv2.imwrite(self.output + "/fourier_edge_test_image1.jpg", gamma)
184 | plt.show()
185 |
186 | def canny_edge_test():
187 | img = cv2.imread('./out_data/gradient_image1.jpg', 0)
188 | edges = cv2.Canny(img, 100, 200)
189 |
190 | plt.subplot(121), plt.imshow(img, cmap='gray')
191 | plt.title('original'), plt.xticks([]), plt.yticks([])
192 | plt.subplot(122), plt.imshow(edges, cmap='gray')
193 | plt.title('edge'), plt.xticks([]), plt.yticks([])
194 | cv2.imwrite(self.output + "/canny_edge_test_image1.jpg", edges)
195 | plt.show()
196 |
197 | gray_dege_test()
198 | fourier_edge_test()
199 | canny_edge_test()
200 |
201 | def op_trans_plot(self):
202 | im_in = cv2.imread("./out_data/custom_binary_first1.jpg", cv2.IMREAD_GRAYSCALE)
203 | th, im_th = cv2.threshold(im_in, 220, 255, cv2.THRESH_BINARY_INV)
204 |
205 | # Copy the thresholded image.
206 | im_floodfill = im_th.copy()
207 |
208 | # Mask used to flood filling.
209 | # Notice the size needs to be 2 pixels than the image.
210 | h, w = im_th.shape[:2]
211 | mask = np.zeros((h + 2, w + 2), np.uint8)
212 |
213 | # Floodfill from point (0, 0)
214 | cv2.floodFill(im_floodfill, mask, (0, 0), 255)
215 | cv2.imwrite(self.output + "/edge_processing1.jpg", im_floodfill)
216 |
217 | def op_counter(self):
218 | ob1 = cv2.imread("./out_data/edge_processing1.jpg", cv2.IMREAD_GRAYSCALE)
219 | # ob1 = cv2.dilate(ob1, None, iterations=2)
220 | ob1 = cv2.bilateralFilter(ob1, 7, sigmaSpace=70, sigmaColor=70)
221 | ob1 = cv2.erode(ob1, None, iterations=2) # 1 # 2
222 | ob1 = cv2.dilate(ob1, None, iterations=2)
223 | ob2 = cv2.imread("./raw_data/icon4.jpg", cv2.IMREAD_GRAYSCALE)
224 | # ob2 = cv2.bilateralFilter(ob2, 7, sigmaSpace=60, sigmaColor=60)
225 | ob2 = cv2.erode(ob2, None, iterations=1)
226 | # ob2 = cv2.dilate(ob2, None, iterations=1)
227 | # orb = cv2.xfeatures2d.SURF_create()
228 | orb = cv2.xfeatures2d.SIFT_create()
229 | keyp1, desp1 = orb.detectAndCompute(ob1, None)
230 | keyp2, desp2 = orb.detectAndCompute(ob2, None)
231 | FLANN_INDEX_KDTREE = 1
232 | index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
233 | search_params = dict(checks=50)
234 | flann = cv2.FlannBasedMatcher(index_params, search_params)
235 | matches = flann.knnMatch(desp1, desp2, k=2)
236 | matchesMask = [[0, 0] for i in range(len(matches))]
237 | for i, (m, n) in enumerate(matches):
238 | if m.distance < 0.7 * n.distance:
239 | matchesMask[i] = [1, 0]
240 | # 如果第一个邻近距离比第二个邻近距离的0.7倍小,则保留
241 | draw_params = dict(matchColor=(0, 255, 0), singlePointColor=(255, 0, 0), matchesMask=matchesMask, flags=0)
242 | img3 = cv2.drawMatchesKnn(ob1, keyp1, ob2, keyp2, matches, None, **draw_params)
243 | a = len(keyp1) // len(keyp2)
244 | plt.figure(figsize=(8, 8))
245 | plt.subplot(211)
246 | plt.imshow(img3)
247 | plt.subplot(212)
248 | plt.text(0.5, 0.6, "the number of sticks:" + str(a), size=30, ha="center", va="center")
249 | plt.axis('off')
250 | plt.show()
251 | cv2.imwrite(self.output+"/counter_sticks_image1.jpg", img3)
252 |
253 | if __name__ == '__main__':
254 | ob = processing_image()
255 | ob.op_gray_to_four_type()
256 | ob.op_first_to_three_type()
257 | # ob.op_cutting_image()
258 | ob.op_edge_test()
259 | ob.op_trans_plot()
260 | ob.op_first_to_three_type(flag=True)
261 | ob.op_counter()
262 |
--------------------------------------------------------------------------------
/first_image/out_data/canny_edge_test_image1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/first_image/out_data/canny_edge_test_image1.jpg
--------------------------------------------------------------------------------
/first_image/out_data/counter_sticks_image1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/first_image/out_data/counter_sticks_image1.jpg
--------------------------------------------------------------------------------
/first_image/out_data/custom_binary_first1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/first_image/out_data/custom_binary_first1.jpg
--------------------------------------------------------------------------------
/first_image/out_data/edge_processing1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/first_image/out_data/edge_processing1.jpg
--------------------------------------------------------------------------------
/first_image/out_data/fourier_edge_test_image1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/first_image/out_data/fourier_edge_test_image1.jpg
--------------------------------------------------------------------------------
/first_image/out_data/global_binary_first1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/first_image/out_data/global_binary_first1.jpg
--------------------------------------------------------------------------------
/first_image/out_data/gradient_image1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/first_image/out_data/gradient_image1.jpg
--------------------------------------------------------------------------------
/first_image/out_data/gray_cutting_image1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/first_image/out_data/gray_cutting_image1.jpg
--------------------------------------------------------------------------------
/first_image/out_data/gray_edge_test.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/first_image/out_data/gray_edge_test.jpg
--------------------------------------------------------------------------------
/first_image/out_data/local_binary_first1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/first_image/out_data/local_binary_first1.jpg
--------------------------------------------------------------------------------
/first_image/out_data/raw_cutting_image1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/first_image/out_data/raw_cutting_image1.jpg
--------------------------------------------------------------------------------
/first_image/out_data/raw_draw_image1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/first_image/out_data/raw_draw_image1.jpg
--------------------------------------------------------------------------------
/first_image/raw_data/1.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/first_image/raw_data/1.JPG
--------------------------------------------------------------------------------
/first_image/raw_data/custom_binary_first1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/first_image/raw_data/custom_binary_first1.jpg
--------------------------------------------------------------------------------
/first_image/raw_data/edge_processing1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/first_image/raw_data/edge_processing1.jpg
--------------------------------------------------------------------------------
/first_image/raw_data/gradient_image1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/first_image/raw_data/gradient_image1.jpg
--------------------------------------------------------------------------------
/first_image/raw_data/gray_cutting_image1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/first_image/raw_data/gray_cutting_image1.jpg
--------------------------------------------------------------------------------
/first_image/raw_data/icon4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/first_image/raw_data/icon4.jpg
--------------------------------------------------------------------------------
/imgs/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/imgs/1.png
--------------------------------------------------------------------------------
/imgs/10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/imgs/10.png
--------------------------------------------------------------------------------
/imgs/11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/imgs/11.png
--------------------------------------------------------------------------------
/imgs/12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/imgs/12.png
--------------------------------------------------------------------------------
/imgs/13.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/imgs/13.jpg
--------------------------------------------------------------------------------
/imgs/13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/imgs/13.png
--------------------------------------------------------------------------------
/imgs/14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/imgs/14.png
--------------------------------------------------------------------------------
/imgs/15.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/imgs/15.png
--------------------------------------------------------------------------------
/imgs/16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/imgs/16.png
--------------------------------------------------------------------------------
/imgs/17.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/imgs/17.png
--------------------------------------------------------------------------------
/imgs/18.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/imgs/18.png
--------------------------------------------------------------------------------
/imgs/2.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/imgs/2.JPG
--------------------------------------------------------------------------------
/imgs/3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/imgs/3.png
--------------------------------------------------------------------------------
/imgs/4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/imgs/4.png
--------------------------------------------------------------------------------
/imgs/5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/imgs/5.png
--------------------------------------------------------------------------------
/imgs/6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/imgs/6.png
--------------------------------------------------------------------------------
/imgs/7.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/imgs/7.jpg
--------------------------------------------------------------------------------
/imgs/8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/imgs/8.png
--------------------------------------------------------------------------------
/imgs/9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/imgs/9.png
--------------------------------------------------------------------------------
/other_image/counter_sticks.py:
--------------------------------------------------------------------------------
1 | # _*_ coding: utf-8 _*_
2 | __author__ = 'LelandYan'
3 | __date__ = '2019/5/19 10:08'
4 |
5 | import cv2
6 | import numpy as np
7 | import matplotlib.pyplot as plt
8 | from scipy import ndimage as ndi
9 | import skimage as sm
10 | from skimage import morphology
11 | from skimage.feature import peak_local_max
12 | from skimage.io import imshow
13 | from skimage.color import rgb2gray
14 | from skimage.filters.rank import median
15 | from skimage.measure import find_contours
16 |
17 |
18 | image = cv2.imread("./raw_data/processing_.png")
19 | image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
20 | ret, binary = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_TRIANGLE)#TRIANGLE法,,全局自适应阈值, 参数0可改为任意数字但不起作用,适用于单个波峰
21 | print("阈值:%s" % ret)
22 | rows,cols = image.shape
23 | labels = np.zeros([rows,cols])
24 | for i in range(rows):
25 | for j in range(cols):
26 | if(image[i,j] > ret):
27 | labels[i,j] = 1
28 | else:
29 | labels[i,j] = 0
30 | thresh = median(labels, sm.morphology.disk(5))
31 | cv2.namedWindow("hull", cv2.WINDOW_NORMAL)
32 | cv2.imshow('hull', thresh)
33 | cv2.waitKey(0)
34 | cv2.destroyAllWindows()
35 |
36 |
37 |
38 | # img = cv2.pyrDown(cv2.imread("./raw_data/4.jpg"))
39 | # # threshold 函数对图像进行二化值处理,由于处理后图像对原图像有所变化,因此img.copy()生成新的图像,cv2.THRESH_BINARY是二化值
40 | # ret, thresh = cv2.threshold(cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY), 127, 255, cv2.THRESH_BINARY)
41 | # # ret, thresh = cv2.threshold(cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY), 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
42 | # thresh = median(thresh, sm.morphology.disk(5))
43 | #
44 | #
45 | # def watershed(img):
46 | # gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
47 | # ret, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
48 | # kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
49 | # mb = cv2.morphologyEx(binary, cv2.MORPH_OPEN, kernel, iterations=2)
50 | # sure_bg = cv2.dilate(mb, kernel, iterations=3)
51 | # dist = cv2.distanceTransform(mb, cv2.DIST_L2, 3)
52 | # dist_output = cv2.normalize(dist, 0, 1.0, cv2.NORM_MINMAX)
53 | # ret, surface = cv2.threshold(dist, dist.max() * 0.6, 255, cv2.THRESH_BINARY)
54 | # surface_fg = np.uint8(surface)
55 | # unknown = cv2.subtract(sure_bg, surface_fg)
56 | # ref, markers = cv2.connectedComponents(sure_bg)
57 | # markers = markers + 1
58 | # markers[unknown == 255] = 0
59 | # markers = cv2.watershed(src, markers=markers)
60 | # src[markers == -1] = [0, 0, 255]
61 | # cv2.imshow("result", src)
62 | #
63 | #
64 | # src = cv2.imread("./raw_data/4.jpg")
65 | # cv2.namedWindow("hull", cv2.WINDOW_NORMAL)
66 | # cv2.imshow('def', src)
67 | # watershed(src)
68 | # cv2.waitKey(0)
69 | # cv2.destroyAllWindows()
70 | #
71 | #
72 | #
73 | #
74 | #
75 | #
76 | #
77 | #
78 | #
79 | #
80 | #
81 | #
82 | # # findContours函数查找图像里的图形轮廓
83 | # # 函数参数thresh是图像对象
84 | # # 层次类型,参数cv2.RETR_EXTERNAL是获取最外层轮廓,cv2.RETR_TREE是获取轮廓的整体结构
85 | # # 轮廓逼近方法
86 | # # 输出的返回值,image是原图像、contours是图像的轮廓、hier是层次类型
87 | # image, contours, hier = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
88 | #
89 | # for c in contours:
90 | # # 轮廓绘制方法一
91 | # # boundingRect函数计算边框值,x,y是坐标值,w,h是矩形的宽和高
92 | # x, y, w, h = cv2.boundingRect(c)
93 | # # 在img图像画出矩形,(x, y), (x + w, y + h)是矩形坐标,(0, 255, 0)设置通道颜色,2是设置线条粗度
94 | # cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
95 | #
96 | # # 轮廓绘制方法二
97 | # # 查找最小区域
98 | # rect = cv2.minAreaRect(c)
99 | # # 计算最小面积矩形的坐标
100 | # box = cv2.boxPoints(rect)
101 | # # 将坐标规范化为整数
102 | # box = np.int0(box)
103 | # # 绘制矩形
104 | # cv2.drawContours(img, [box], 0, (0, 0, 255), 3)
105 | #
106 | # # 轮廓绘制方法三
107 | # # 圆心坐标和半径的计算
108 | # (x, y), radius = cv2.minEnclosingCircle(c)
109 | # # 规范化为整数
110 | # center = (int(x), int(y))
111 | # radius = int(radius)
112 | # # 勾画圆形区域
113 | # img = cv2.circle(img, center, radius, (0, 255, 0), 2)
114 | #
115 | # # # 轮廓绘制方法四
116 | # # 围绕图形勾画蓝色线条
117 | # cv2.drawContours(img, contours, -1, (255, 0, 0), 2)
118 | # # 显示图像
119 | # cv2.imshow("contours", img)
120 | # cv2.waitKey()
121 | # cv2.destroyAllWindows()
122 |
--------------------------------------------------------------------------------
/other_image/edge_cutting.py:
--------------------------------------------------------------------------------
1 | # _*_ coding: utf-8 _*_
2 | __author__ = 'LelandYan'
3 | __date__ = '2019/5/19 10:58'
4 | import cv2
5 | import numpy as np
6 | import matplotlib.pyplot as plt
7 | from scipy import ndimage as ndi
8 | import skimage as sm
9 | from skimage import morphology
10 | from skimage.feature import peak_local_max
11 | from skimage.filters.rank import median
12 |
13 |
14 | image = cv2.imread("./raw_data/1.jpg")
15 | gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
16 | ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
17 | # thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 25, 10)
18 | thresh = median(thresh, sm.morphology.disk(5))
19 |
20 | # noise removal
21 | kernel = np.ones((3,3),np.uint8)
22 | opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 1)
23 | ######################################################################################
24 | th, im_th = cv2.threshold(opening, 220, 255, cv2.THRESH_BINARY_INV)
25 |
26 | # Copy the thresholded image.
27 | im_floodfill = im_th.copy()
28 |
29 | # Mask used to flood filling.
30 | # Notice the size needs to be 2 pixels than the image.
31 | h, w = im_th.shape[:2]
32 | mask = np.zeros((h + 2, w + 2), np.uint8)
33 |
34 | # Floodfill from point (0, 0)
35 | cv2.floodFill(im_floodfill, mask, (0, 0), 255)
36 |
37 | opening = im_floodfill
38 | ###########################################################################
39 | # sure background area
40 | sure_bg = cv2.dilate(opening,kernel,iterations=6)
41 |
42 |
43 | # Finding sure foreground area
44 | dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)
45 | ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)
46 | # Finding unknown region
47 | sure_fg = np.uint8(sure_fg)
48 | unknown = cv2.subtract(sure_bg,sure_fg)
49 |
50 | plt.imshow(sure_fg,'gray')
51 |
52 |
53 | cv2.namedWindow("binary2", cv2.WINDOW_NORMAL)
54 | cv2.imshow('binary2', opening)
55 | cv2.waitKey(0)
56 | cv2.destroyAllWindows()
57 |
58 |
--------------------------------------------------------------------------------
/other_image/first_image.py:
--------------------------------------------------------------------------------
1 | # _*_ coding: utf-8 _*_
2 | __author__ = 'LelandYan'
3 | __date__ = '2019/5/16 19:32'
4 | import numpy as np
5 | import cv2
6 | import matplotlib.pyplot as plt
7 | import copy
8 | from pylab import mpl
9 |
10 | # 防止中文乱码
11 | mpl.rcParams['font.sans-serif'] = ['SimHei']
12 |
13 |
14 | class processing_image:
15 | def __init__(self, filename="./raw_data/timg.jpg", output="./out_data",icon="./raw_data/icon_timg.jpg"):
16 | self.filename = filename
17 | self.output = output
18 | self.icon = icon
19 |
20 | def op_gray_to_four_type(self, kernel=(9, 9), erode_iter=5, dilate_iter=5):
21 |
22 | img = cv2.imread(self.filename)
23 | # gray
24 | img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
25 | # erode dilate
26 | closed = cv2.erode(img, None, iterations=erode_iter)
27 | img = cv2.dilate(closed, None, iterations=dilate_iter)
28 |
29 | kernel = np.ones(kernel, np.uint8)
30 | # open operation
31 | img_open = cv2.morphologyEx(img, op=cv2.MORPH_OPEN, kernel=kernel)
32 | # close operation
33 | img_close = cv2.morphologyEx(img, op=cv2.MORPH_CLOSE, kernel=kernel)
34 | # gradient operation
35 | img_grad = cv2.morphologyEx(img, op=cv2.MORPH_GRADIENT, kernel=kernel)
36 | # tophat operation
37 | img_tophat = cv2.morphologyEx(img, op=cv2.MORPH_TOPHAT, kernel=kernel)
38 | # blackhat operation
39 | img_blackhat = cv2.morphologyEx(img, op=cv2.MORPH_BLACKHAT, kernel=kernel)
40 | # Plot the images
41 | images = [img, img_open, img_close, img_grad,
42 | img_tophat, img_blackhat]
43 | names = ["raw_img", "img_open", "img_close", "img_grad", "img_tophat", "img_blackhat"]
44 | cv2.imwrite(self.output+"/gradient_image1.jpg",img_grad)
45 | fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(15, 15))
46 | for ind, p in enumerate(images):
47 | ax = axs[ind // 3, ind % 3]
48 | ax.imshow(p, cmap='gray')
49 | ax.set_title(names[ind])
50 | ax.axis('off')
51 | plt.show()
52 |
53 | def op_first_to_three_type(self, flag=False):
54 | # 全局阈值
55 | def threshold_demo(image):
56 | gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # 把输入图像灰度化
57 | # 直接阈值化是对输入的单通道矩阵逐像素进行阈值分割。
58 | ret, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_TRIANGLE)
59 | if flag:
60 | cv2.imwrite(self.output + "/global_binary_first1.jpg", binary)
61 | return binary
62 |
63 | # 局部阈值
64 | def local_threshold(image):
65 | gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # 把输入图像灰度化
66 | # 自适应阈值化能够根据图像不同区域亮度分布,改变阈值
67 | binary = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 25, 10)
68 | if flag:
69 | cv2.imwrite(self.output + "/local_binary_first1.jpg", binary)
70 | return binary
71 |
72 | # 用户自己计算阈值
73 | def custom_threshold(image):
74 | gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # 把输入图像灰度化
75 | h, w = gray.shape[:2]
76 | m = np.reshape(gray, [1, w * h])
77 | mean = m.sum() / (w * h)
78 | ret, binary = cv2.threshold(gray, mean, 255, cv2.THRESH_BINARY)
79 | if flag:
80 | cv2.imwrite(self.output + "/custom_binary_first1.jpg", binary)
81 | return binary
82 |
83 | if flag:
84 | src = cv2.imread("./out_data/gradient_image1.jpg")
85 | else:
86 | src = cv2.imread(self.filename)
87 | src = cv2.cvtColor(src, cv2.COLOR_BGR2RGB)
88 | global_scr = threshold_demo(src)
89 | local_scr = local_threshold(src)
90 | custom_src = custom_threshold(src)
91 | images = [src, global_scr, local_scr,
92 | custom_src]
93 | names = ["src", "global_scr", "local_scr", "custom_src"]
94 | fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(10, 10))
95 | for ind, p in enumerate(images):
96 | ax = axs[ind // 2, ind % 2]
97 | ax.imshow(p, cmap='gray')
98 | ax.set_title(names[ind])
99 | ax.axis('off')
100 | plt.show()
101 |
102 | def op_cutting_image(self):
103 | raw_img = cv2.imread(self.filename)
104 | img = cv2.imread("./out_data/gradient_image1.jpg")
105 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
106 | blurred = cv2.bilateralFilter(gray, 7, sigmaSpace=75, sigmaColor=75)
107 | ret, binary = cv2.threshold(blurred, 127, 255, cv2.THRESH_BINARY)
108 | closed = cv2.dilate(binary, None, iterations=130)
109 | closed = cv2.erode(closed, None, iterations=127)
110 |
111 | _, contours, hierarchy = cv2.findContours(closed, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
112 | c = sorted(contours, key=cv2.contourArea, reverse=True)[0]
113 |
114 | # compute the rotated bounding box of the largest contour
115 | rect = cv2.minAreaRect(c)
116 | box = np.int0(cv2.boxPoints(rect))
117 | # draw a bounding box arounded the detected barcode and display the image
118 | draw_img = cv2.drawContours(raw_img.copy(), [box], -1, (0, 0, 255), 3)
119 |
120 | h, w, _ = img.shape
121 | Xs = [i[0] for i in box]
122 | Ys = [i[1] for i in box]
123 | x1 = min(Xs)
124 | x2 = max(Xs)
125 | y1 = min(Ys)
126 | y2 = max(Ys)
127 | hight = y2 - y1
128 | width = x2 - x1
129 | crop_img = img[0:h - hight, x1:x1 + width]
130 | raw_img = raw_img[0:h - hight, x1:x1 + width]
131 | cv2.imwrite(self.output + "/raw_draw_image1.jpg", draw_img)
132 | cv2.imwrite(self.output + "/raw_cutting_image1.jpg", raw_img)
133 | cv2.imwrite(self.output + "/gray_cutting_image1.jpg", crop_img)
134 |
135 | def op_edge_test(self):
136 | def gray_dege_test():
137 | img = cv2.imread("./out_data/gradient_image1.jpg")
138 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
139 | blurred = cv2.GaussianBlur(gray, (9, 9), 0)
140 | ret, binary = cv2.threshold(blurred, 127, 255, cv2.THRESH_BINARY)
141 | closed = cv2.dilate(binary, None, iterations=110)
142 | closed = cv2.erode(closed, None, iterations=120)
143 |
144 | _, contours, hierarchy = cv2.findContours(closed, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
145 |
146 | cv2.drawContours(img, contours, -1, (0, 0, 255), 3)
147 | plt.imshow(img)
148 | plt.show()
149 | cv2.imwrite(self.output + "/gray_edge_test.jpg", img)
150 |
151 | def fourier_edge_test():
152 | img = cv2.imread('./out_data/gradient_image1.jpg', 0)
153 | f = np.fft.fft2(img)
154 | fshift = np.fft.fftshift(f)
155 |
156 | rows, cols = img.shape
157 | crow, ccol = int(rows / 2), int(cols / 2)
158 | for i in range(crow - 30, crow + 30):
159 | for j in range(ccol - 30, ccol + 30):
160 | fshift[i][j] = 0.0
161 | f_ishift = np.fft.ifftshift(fshift)
162 | img_back = np.fft.ifft2(f_ishift) # 进行高通滤波
163 | # 取绝对值
164 | img_back = np.abs(img_back)
165 | plt.subplot(121), plt.imshow(img, cmap='gray') # 因图像格式问题,暂已灰度输出
166 | plt.title('Input Image'), plt.xticks([]), plt.yticks([])
167 | # 先对灰度图像进行伽马变换,以提升暗部细节
168 | rows, cols = img_back.shape
169 | gamma = copy.deepcopy(img_back)
170 | rows = img.shape[0]
171 | cols = img.shape[1]
172 | for i in range(rows):
173 | for j in range(cols):
174 | gamma[i][j] = 5.0 * pow(gamma[i][j], 0.34) # 0.34这个参数是我手动调出来的,根据不同的图片,可以选择不同的数值
175 | # 对灰度图像进行反转
176 |
177 | for i in range(rows):
178 | for j in range(cols):
179 | gamma[i][j] = 255 - gamma[i][j]
180 |
181 | plt.subplot(122), plt.imshow(gamma, cmap='gray')
182 | plt.title('Result in HPF'), plt.xticks([]), plt.yticks([])
183 | cv2.imwrite(self.output + "/fourier_edge_test_image1.jpg", gamma)
184 | plt.show()
185 |
186 | def canny_edge_test():
187 | img = cv2.imread('./out_data/gradient_image1.jpg', 0)
188 | edges = cv2.Canny(img, 100, 200)
189 |
190 | plt.subplot(121), plt.imshow(img, cmap='gray')
191 | plt.title('original'), plt.xticks([]), plt.yticks([])
192 | plt.subplot(122), plt.imshow(edges, cmap='gray')
193 | plt.title('edge'), plt.xticks([]), plt.yticks([])
194 | cv2.imwrite(self.output + "/canny_edge_test_image1.jpg", edges)
195 | plt.show()
196 |
197 | gray_dege_test()
198 | fourier_edge_test()
199 | canny_edge_test()
200 |
201 | def op_trans_plot(self):
202 | im_in = cv2.imread("./out_data/global_binary_first1.jpg", cv2.IMREAD_GRAYSCALE)
203 | th, im_th = cv2.threshold(im_in, 220, 255, cv2.THRESH_BINARY_INV)
204 |
205 | # Copy the thresholded image.
206 | im_floodfill = im_th.copy()
207 |
208 | # Mask used to flood filling.
209 | # Notice the size needs to be 2 pixels than the image.
210 | h, w = im_th.shape[:2]
211 | mask = np.zeros((h + 2, w + 2), np.uint8)
212 |
213 | # Floodfill from point (0, 0)
214 | cv2.floodFill(im_floodfill, mask, (0, 0), 255)
215 | cv2.imwrite(self.output + "/edge_processing1.jpg", im_floodfill)
216 |
217 | def op_counter(self):
218 | ob1 = cv2.imread("./out_data/edge_processing1.jpg", cv2.IMREAD_GRAYSCALE)
219 | # ob1 = cv2.dilate(ob1, None, iterations=2)
220 | ob1 = cv2.bilateralFilter(ob1, 7, sigmaSpace=70, sigmaColor=70)
221 | ob1 = cv2.erode(ob1, None, iterations=2) # 1 # 2
222 | ob1 = cv2.dilate(ob1, None, iterations=2)
223 | ob2 = cv2.imread(self.icon, cv2.IMREAD_GRAYSCALE)
224 | # ob2 = cv2.bilateralFilter(ob2, 7, sigmaSpace=60, sigmaColor=60)
225 | ob2 = cv2.erode(ob2, None, iterations=1)
226 | # ob2 = cv2.dilate(ob2, None, iterations=1)
227 | # orb = cv2.xfeatures2d.SURF_create()
228 | orb = cv2.xfeatures2d.SIFT_create()
229 | keyp1, desp1 = orb.detectAndCompute(ob1, None)
230 | keyp2, desp2 = orb.detectAndCompute(ob2, None)
231 | FLANN_INDEX_KDTREE = 1
232 | index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
233 | search_params = dict(checks=50)
234 | flann = cv2.FlannBasedMatcher(index_params, search_params)
235 | matches = flann.knnMatch(desp1, desp2, k=2)
236 | matchesMask = [[0, 0] for i in range(len(matches))]
237 | for i, (m, n) in enumerate(matches):
238 | if m.distance < 0.7 * n.distance:
239 | matchesMask[i] = [1, 0]
240 | # 如果第一个邻近距离比第二个邻近距离的0.7倍小,则保留
241 | draw_params = dict(matchColor=(0, 255, 0), singlePointColor=(255, 0, 0), matchesMask=matchesMask, flags=0)
242 | img3 = cv2.drawMatchesKnn(ob1, keyp1, ob2, keyp2, matches, None, **draw_params)
243 | a = len(keyp1) // len(keyp2)
244 | plt.figure(figsize=(8, 8))
245 | plt.subplot(211)
246 | plt.imshow(img3)
247 | plt.subplot(212)
248 | plt.text(0.5, 0.6, "the number of sticks:" + str(a), size=30, ha="center", va="center")
249 | plt.axis('off')
250 | plt.show()
251 | cv2.imwrite(self.output+"/counter_sticks_image1.jpg", img3)
252 |
253 | if __name__ == '__main__':
254 | ob = processing_image(filename="./raw_data/timber4.jpg",icon="./raw_data/icon_timgber4.png")
255 | ob.op_gray_to_four_type()
256 | ob.op_first_to_three_type()
257 | # ob.op_cutting_image()
258 | ob.op_edge_test()
259 | ob.op_first_to_three_type(flag=True)
260 | ob.op_trans_plot()
261 | ob.op_counter()
262 |
--------------------------------------------------------------------------------
/other_image/last_test.py:
--------------------------------------------------------------------------------
1 | # _*_ coding: utf-8 _*_
2 | __author__ = 'LelandYan'
3 | __date__ = '2019/5/19 16:22'
4 |
5 | import cv2
6 | import matplotlib.pyplot as plt
7 | import numpy as np
8 |
9 | img = cv2.imread("aaaaaaaa.jpg")
10 | # img = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
11 | img_hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
12 | # cv2.namedWindow("hull", cv2.WINDOW_NORMAL)
13 | # cv2.imshow("hull", img)
14 | # cv2.waitKey()
15 | # cv2.destroyAllWindows()
16 | from scipy import ndimage as ndi
17 |
18 | img = img[:, :, 0]
19 | plt.figure('hist_plot')
20 | arr = img.flatten()
21 | plt.hist(arr, 256)
22 | plt.show()
23 |
24 | rows, cols = img.shape
25 | labels = np.zeros([rows, cols])
26 | for i in range(rows):
27 | for j in range(cols):
28 | if (img[i, j] > 70):
29 | labels[i, j] = 1
30 | else:
31 | labels[i, j] = 0
32 |
33 | cv2.namedWindow("labels", cv2.WINDOW_NORMAL)
34 | cv2.imshow("labels", labels)
35 | cv2.waitKey(0)
36 | cv2.destroyAllWindows()
37 |
--------------------------------------------------------------------------------
/other_image/main.py:
--------------------------------------------------------------------------------
1 | # _*_ coding: utf-8 _*_
2 | __author__ = 'LelandYan'
3 | __date__ = '2019/5/19 7:42'
4 |
5 | import cv2
6 | import numpy as np
7 | import matplotlib.pyplot as plt
8 | from scipy import ndimage as ndi
9 | import skimage as sm
10 | from skimage import morphology
11 | from skimage.feature import peak_local_max
12 | from skimage.io import imshow
13 | from skimage.color import rgb2gray
14 | from skimage.filters.rank import median
15 | from skimage.measure import find_contours
16 |
17 |
18 | # image = cv2.imread("./raw_data/1.jpg")
19 | # dst = cv2.fastNlMeansDenoisingColored(image,None,10,10,7,21)
20 | # img = cv2.pyrDown(dst, cv2.IMREAD_UNCHANGED)
21 | # # ret, thresh = cv2.threshold(cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY) , 127, 255, cv2.THRESH_BINARY)
22 | # thresh = cv2.adaptiveThreshold(cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 25, 10)
23 | # thresh = median(thresh, sm.morphology.disk(5))
24 | # cv2.namedWindow("thresh", cv2.WINDOW_NORMAL)
25 | # cv2.imshow("thresh", thresh)
26 | # cv2.waitKey()
27 | # cv2.destroyAllWindows()
28 | ###################################################################
29 |
30 | ##################################################################
31 | # kernel = np.ones((3,3),np.uint8)
32 | # opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 1)
33 |
34 | # threshold, imgOtsu = cv2.threshold(thresh, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
35 | # cv2.namedWindow("hull", cv2.WINDOW_NORMAL)
36 | # cv2.imshow("hull", imgOtsu)
37 | # cv2.waitKey()
38 | # cv2.destroyAllWindows()
39 | # cv2.namedWindow("hull", cv2.WINDOW_NORMAL)
40 | # cv2.imshow("hull", thresh)
41 | # cv2.waitKey()
42 | # cv2.destroyAllWindows()
43 | # findContours函数查找图像里的图形轮廓
44 | # 函数参数thresh是图像对象
45 | # 层次类型,参数cv2.RETR_EXTERNAL是获取最外层轮廓,cv2.RETR_TREE是获取轮廓的整体结构
46 | # 轮廓逼近方法
47 | # 输出的返回值,image是原图像、contours是图像的轮廓、hier是层次类型
48 | #########################################################################################
49 | image = cv2.imread("./raw_data/4.jpg")
50 | gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
51 |
52 | ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
53 | # thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 25, 10)
54 |
55 |
56 | # noise removal
57 | kernel = np.ones((3,3),np.uint8)
58 | opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 1)
59 | opening = cv2.bilateralFilter(opening,9,80,80)
60 | opening = median(opening, sm.morphology.disk(3))
61 | # opening = cv2.morphologyEx(opening,cv2.MORPH_GRADIENT,kernel, iterations = 1)
62 | ######################################################################################
63 | th, im_th = cv2.threshold(opening, 220, 255, cv2.THRESH_BINARY_INV)
64 | # sure_bg = cv2.dilate(opening,kernel,iterations=2)
65 | # Copy the thresholded image.
66 | im_floodfill = im_th.copy()
67 |
68 | # Mask used to flood filling.
69 | # Notice the size needs to be 2 pixels than the image.
70 | h, w = im_th.shape[:2]
71 | mask = np.zeros((h + 2, w + 2), np.uint8)
72 |
73 | # Floodfill from point (0, 0)
74 | cv2.floodFill(im_floodfill, mask, (0, 0), 255)
75 |
76 | opening = im_floodfill
77 | opening = cv2.erode(opening,kernel,iterations=7)
78 | #########################################################################################
79 | image, contours, hier = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
80 | # 创建新的图像black
81 | black = cv2.cvtColor(np.zeros((image.shape[1], image.shape[0]), dtype=np.uint8), cv2.COLOR_GRAY2BGR)
82 |
83 | counter = 0
84 | for p,cnt in enumerate(contours):
85 |
86 | area = cv2.contourArea(contours[p])
87 | if area < 30:
88 | print("$$$$")
89 | continue
90 | # 轮廓周长也被称为弧长。可以使用函数 cv2.arcLength() 计算得到。这个函数的第二参数可以用来指定对象的形状是闭合的(True) ,还是打开的(一条曲线)
91 | epsilon = 0.01 * cv2.arcLength(cnt, True)
92 | # 函数approxPolyDP来对指定的点集进行逼近,cnt是图像轮廓,epsilon表示的是精度,越小精度越高,因为表示的意思是是原始曲线与近似曲线之间的最大距离。
93 | # 第三个函数参数若为true,则说明近似曲线是闭合的,它的首位都是相连,反之,若为false,则断开。
94 | approx = cv2.approxPolyDP(cnt, epsilon, True)
95 | # convexHull检查一个曲线的凸性缺陷并进行修正,参数cnt是图像轮廓。
96 | hull = cv2.convexHull(cnt)
97 | # 勾画图像原始的轮廓
98 | cv2.drawContours(black, [cnt], -1, (0, 255, 0), 2)
99 | # 用多边形勾画轮廓区域
100 | cv2.drawContours(black, [approx], -1, (255, 255, 0), 2)
101 | # 修正凸性缺陷的轮廓区域
102 | cv2.drawContours(black, [hull], -1, (0, 0, 255), 2)
103 | counter+=1
104 | # 显示图像
105 | print(counter)
106 | plt.imshow(black)
107 |
108 | cv2.namedWindow("hull", cv2.WINDOW_NORMAL)
109 | cv2.imshow("hull", black)
110 | cv2.waitKey()
111 | cv2.destroyAllWindows()
112 | from scipy import ndimage as ndi
113 | # labels = dst
114 | distance = ndi.distance_transform_edt(opening) #距离变换
115 | # min_distance:最小的像素在2×min_distance + 1区分离(即峰峰数至少min_distance分隔)。找到峰值的最大数量,使用min_distance = 1。
116 | # exclude_border:不排除峰值在图像的边界
117 | # indices:False会返回和数组相同大小的布尔数组,为True时,会返回峰值的坐标
118 | local_maxi =peak_local_max(distance, exclude_border = 0,min_distance = 12,indices=False,
119 | footprint=np.ones((10, 10)),labels=opening) #寻找峰值
120 | markers = ndi.label(local_maxi)[0] #初始标记点
121 | label_ =morphology.watershed(-distance, markers, mask=opening) #基于距离变换的分水岭算法
122 |
123 | fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12, 12))
124 | axes = axes.ravel()
125 | ax0, ax1, ax2, ax3 = axes
126 |
127 | ax0.imshow(opening, cmap=plt.cm.gray)#, interpolation='nearest')
128 | ax0.set_title("Original")
129 | ax1.imshow(-distance, cmap=plt.cm.jet, interpolation='nearest')
130 | ax1.set_title("Distance")
131 | ax2.imshow(sm.morphology.dilation(markers,sm.morphology.square(10)), cmap=plt.cm.Spectral, interpolation='nearest')
132 | ax2.set_title("Markers")
133 | ax3.imshow(label_, cmap=plt.cm.Spectral, interpolation='nearest')
134 | ax3.set_title("Segmented")
135 | for ax in axes:
136 | ax.axis('off')
137 |
138 | fig.tight_layout()
139 | plt.show()
140 |
--------------------------------------------------------------------------------
/other_image/other_image.py:
--------------------------------------------------------------------------------
1 | # _*_ coding: utf-8 _*_
2 | __author__ = 'LelandYan'
3 | __date__ = '2019/5/17 18:55'
4 |
5 | import cv2
6 | import numpy as np
7 | import matplotlib.pyplot as plt
8 | from scipy import ndimage as ndi
9 | import skimage as sm
10 | from skimage import morphology
11 | from skimage.feature import peak_local_max
12 | from skimage.filters.rank import median
13 | image = cv2.imread("./raw_data/4.jpg")
14 | kernel_sharpen_1 = np.array([
15 | [-1,-1,-1],
16 | [-1,9,-1],
17 | [-1,-1,-1]])
18 | kernel_sharpen_2 = np.array([
19 | [1,1,1],
20 | [1,-7,1],
21 | [1,1,1]])
22 | kernel_sharpen_3 = np.array([
23 | [-1,-1,-1,-1,-1],
24 | [-1,2,2,2,-1],
25 | [-1,2,8,2,-1],
26 | [-1,2,2,2,-1],
27 | [-1,-1,-1,-1,-1]])/8.0
28 |
29 | output_1 = cv2.filter2D(image,-1,kernel_sharpen_3)
30 | # output_2 = cv2.filter2D(image,-1,kernel_sharpen_2)
31 | # output_3 = cv2.filter2D(image,-1,kernel_sharpen_3)
32 | # 显示锐化效果
33 | # cv2.namedWindow('Original Image', cv2.WINDOW_NORMAL)
34 | # cv2.imwrite('Original_Image1.jpg',image)
35 | # cv2.namedWindow('sharpen_1 Image', cv2.WINDOW_NORMAL)
36 | # cv2.imwrite('./out_data/sharpen_1_Image1.jpg',output_1)
37 | # cv2.namedWindow('sharpen_2 Image', cv2.WINDOW_NORMAL)
38 | # cv2.imwrite('./out_data/sharpen_2_Image1.jpg',output_2)
39 | # cv2.namedWindow('sharpen_3 Image', cv2.WINDOW_NORMAL)
40 | # cv2.imwrite('./out_data/sharpen_3_Image1.jpg',output_3)
41 |
42 |
43 | output_1 = cv2.cvtColor(output_1, cv2.COLOR_RGB2GRAY) # 把输入图像灰度化
44 | # output_2 = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) #把输入图像灰度化
45 | # output_3 = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) #把输入图像灰度化
46 |
47 | # cv2.namedWindow('im_floodfill', 0)
48 | # cv2.imshow("im_floodfill", output_1)
49 | #
50 | # cv2.waitKey(0)
51 | # cv2.destroyAllWindows()
52 |
53 |
54 |
55 | # cv2.namedWindow('sharpen_1 Image', cv2.WINDOW_NORMAL)
56 | # cv2.imwrite('./out_data/gray_sharpen_1_Image1.jpg',output_1)
57 | # # cv2.namedWindow('sharpen_2 Image', cv2.WINDOW_NORMAL)
58 | # cv2.imwrite('./out_data/gray_sharpen_2_Image1.jpg',output_2)
59 | # # cv2.namedWindow('sharpen_3 Image', cv2.WINDOW_NORMAL)
60 | # cv2.imwrite('./out_data/gray_sharpen_3_Image1.jpg',output_3)
61 |
62 | #
63 | plt.hist(output_1.ravel(),256)
64 | plt.show()
65 | ret, binary = cv2.threshold(output_1, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_TRIANGLE)#TRIANGLE法,,全局自适应阈值, 参数0可改为任意数字但不起作用,适用于单个波峰
66 | print("阈值:%s" % ret)
67 | # cv2.namedWindow("binary0", cv2.WINDOW_NORMAL)
68 | # #cv.imwrite("binary_first11.jpg", binary)
69 | # cv2.imshow("binary0", binary)
70 | # cv2.waitKey(0)
71 | # cv2.destroyAllWindows()
72 | rows,cols = output_1.shape
73 | labels = np.zeros([rows,cols])
74 | for i in range(rows):
75 | for j in range(cols):
76 | if(output_1[i,j] > ret):
77 | labels[i,j] = 1
78 | else:
79 | labels[i,j] = 0
80 |
81 | # cv2.namedWindow("labels", cv2.WINDOW_NORMAL)
82 | # cv2.imwrite("aaaa.jpg", labels)
83 | # cv2.imshow("labels", labels)
84 | # cv2.waitKey(0)
85 | # cv2.destroyAllWindows()
86 | #
87 | # #
88 | labels = median(labels, sm.morphology.disk(5))
89 | distance = ndi.distance_transform_edt(labels) #距离变换
90 | # min_distance:最小的像素在2×min_distance + 1区分离(即峰峰数至少min_distance分隔)。找到峰值的最大数量,使用min_distance = 1。
91 | # exclude_border:不排除峰值在图像的边界
92 | # indices:False会返回和数组相同大小的布尔数组,为True时,会返回峰值的坐标
93 | local_maxi = peak_local_max(distance, exclude_border = 0,min_distance = 12,indices=False,
94 | footprint=np.ones((10, 10)),labels=labels) #寻找峰值
95 | markers = ndi.label(local_maxi)[0] #初始标记点
96 | label_ =morphology.watershed(-distance, markers, mask=labels) #基于距离变换的分水岭算法
97 |
98 | fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12, 12))
99 | axes = axes.ravel()
100 | ax0, ax1, ax2, ax3 = axes
101 |
102 | ax0.imshow(labels, cmap=plt.cm.gray)#, interpolation='nearest')
103 | ax0.set_title("Original")
104 | ax1.imshow(-distance, cmap=plt.cm.jet, interpolation='nearest')
105 | ax1.set_title("Distance")
106 | ax2.imshow(sm.morphology.dilation(markers,sm.morphology.square(10)), cmap= plt.cm.Spectral, interpolation='nearest')
107 | ax2.set_title("Markers")
108 | plt.imshow(label_, cmap= plt.cm.Spectral, interpolation='nearest')
109 | print(label_.shape)
110 | ax3.set_title("Segmented")
111 |
112 | for ax in axes:
113 | ax.axis('off')
114 |
115 | fig.tight_layout()
116 | plt.show()
117 |
118 |
119 |
120 | # import math
121 | # err = []
122 | # import math
123 | # err = []
124 | # for i in range(binary.shape[0]):
125 | # h1,w1 = binary[i][0],binary[i][1]
126 | # if i in err:
127 | # continue
128 | # for j in range(i+1,binary.shape[0]):
129 | # h2,w2 = binary[j][0],binary[j][1]
130 | # ab = math.sqrt(math.pow(abs(h2-h1), 2) + math.pow(abs(w2-w1), 2))
131 | # if ab <= 10:
132 | # # print 'error:' , x_y[i],' and ', x_y[j],'i,j = ',i,j
133 | # err.append(j)
134 | # new_x_y = []
135 | # for i in range(len(binary)):
136 | # if i not in err:
137 | # new_x_y.append(binary[i])
138 | # print('一共有',len(binary),'个圈')
139 | #
140 | #
141 | # # def threshold_demo(image):
142 | # # gray = image
143 | # # # gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) #把输入图像灰度化
144 | # # #直接阈值化是对输入的单通道矩阵逐像素进行阈值分割。
145 | # # ret, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_TRIANGLE)
146 | # # print("threshold value %s"%ret)
147 | # # cv2.namedWindow("binary0", cv2.WINDOW_NORMAL)
148 | # # #cv.imwrite("binary_first11.jpg", binary)
149 | # # cv2.imshow("binary0", binary)
150 | # #
151 | # # #局部阈值
152 | # # def local_threshold(image):
153 | # # gray = image
154 | # # # gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) #把输入图像灰度化
155 | # # #自适应阈值化能够根据图像不同区域亮度分布,改变阈值
156 | # # binary = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY, 25, 10)
157 | # # cv2.namedWindow("binary1", cv2.WINDOW_NORMAL)
158 | # # #cv.imwrite("binary_first22.jpg", binary)
159 | # # cv2.imshow("binary1", binary)
160 | # #
161 | # # #用户自己计算阈值
162 | # # def custom_threshold(image):
163 | # # gray = image
164 | # # # gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) #把输入图像灰度化
165 | # # h, w =gray.shape[:2]
166 | # # m = np.reshape(gray, [1,w*h])
167 | # # mean = m.sum()/(w*h)
168 | # # print("mean:",mean)
169 | # # ret, binary = cv2.threshold(gray, mean, 255, cv2.THRESH_BINARY)
170 | # # #cv.imwrite("binary_first33.jpg", binary)
171 | # # cv2.namedWindow("binary2", cv2.WINDOW_NORMAL)
172 | # # cv2.imshow("binary2", binary)
173 | # #
174 | # # # src = cv2.imread(output_1)
175 | # # src = output_3
176 | # # cv2.namedWindow('input_image', cv2.WINDOW_NORMAL) #设置为WINDOW_NORMAL可以任意缩放
177 | # # cv2.imshow('input_image', src)
178 | # #
179 | # # threshold_demo(src)
180 | # # local_threshold(src)
181 | # # custom_threshold(src)
182 | # # cv2.waitKey(0)
183 | # # cv2.destroyAllWindows()
184 |
--------------------------------------------------------------------------------
/other_image/out_data/canny_edge_test_image1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/out_data/canny_edge_test_image1.jpg
--------------------------------------------------------------------------------
/other_image/out_data/counter_sticks_image1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/out_data/counter_sticks_image1.jpg
--------------------------------------------------------------------------------
/other_image/out_data/custom_binary_first1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/out_data/custom_binary_first1.jpg
--------------------------------------------------------------------------------
/other_image/out_data/edge_processing1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/out_data/edge_processing1.jpg
--------------------------------------------------------------------------------
/other_image/out_data/fourier_edge_test_image1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/out_data/fourier_edge_test_image1.jpg
--------------------------------------------------------------------------------
/other_image/out_data/global_binary_first1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/out_data/global_binary_first1.jpg
--------------------------------------------------------------------------------
/other_image/out_data/gradient_image1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/out_data/gradient_image1.jpg
--------------------------------------------------------------------------------
/other_image/out_data/gray_edge_test.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/out_data/gray_edge_test.jpg
--------------------------------------------------------------------------------
/other_image/out_data/gray_sharpen_1_Image1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/out_data/gray_sharpen_1_Image1.jpg
--------------------------------------------------------------------------------
/other_image/out_data/gray_sharpen_2_Image1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/out_data/gray_sharpen_2_Image1.jpg
--------------------------------------------------------------------------------
/other_image/out_data/gray_sharpen_3_Image1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/out_data/gray_sharpen_3_Image1.jpg
--------------------------------------------------------------------------------
/other_image/out_data/local_binary_first1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/out_data/local_binary_first1.jpg
--------------------------------------------------------------------------------
/other_image/out_data/sharpen_1_Image1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/out_data/sharpen_1_Image1.jpg
--------------------------------------------------------------------------------
/other_image/out_data/sharpen_2_Image1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/out_data/sharpen_2_Image1.jpg
--------------------------------------------------------------------------------
/other_image/out_data/sharpen_3_Image1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/out_data/sharpen_3_Image1.jpg
--------------------------------------------------------------------------------
/other_image/raw_data/1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/raw_data/1.jpg
--------------------------------------------------------------------------------
/other_image/raw_data/2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/raw_data/2.jpg
--------------------------------------------------------------------------------
/other_image/raw_data/3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/raw_data/3.jpg
--------------------------------------------------------------------------------
/other_image/raw_data/4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/raw_data/4.jpg
--------------------------------------------------------------------------------
/other_image/raw_data/5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/raw_data/5.jpg
--------------------------------------------------------------------------------
/other_image/raw_data/6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/raw_data/6.jpg
--------------------------------------------------------------------------------
/other_image/raw_data/7.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/raw_data/7.jpg
--------------------------------------------------------------------------------
/other_image/raw_data/icon10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/raw_data/icon10.png
--------------------------------------------------------------------------------
/other_image/raw_data/icon11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/raw_data/icon11.png
--------------------------------------------------------------------------------
/other_image/raw_data/icon_timg.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/raw_data/icon_timg.jpg
--------------------------------------------------------------------------------
/other_image/raw_data/icon_timgber4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/raw_data/icon_timgber4.png
--------------------------------------------------------------------------------
/other_image/raw_data/mucai-003 (1).jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/raw_data/mucai-003 (1).jpg
--------------------------------------------------------------------------------
/other_image/raw_data/processing_.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/raw_data/processing_.png
--------------------------------------------------------------------------------
/other_image/raw_data/timber4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/raw_data/timber4.jpg
--------------------------------------------------------------------------------
/other_image/raw_data/timg.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/other_image/raw_data/timg.jpg
--------------------------------------------------------------------------------
/other_image/test2.py:
--------------------------------------------------------------------------------
1 | # _*_ coding: utf-8 _*_
2 | __author__ = 'LelandYan'
3 | __date__ = '2019/5/19 7:23'
4 |
5 | import cv2
6 | import numpy as np
7 | import matplotlib.pyplot as plt
8 | from scipy import ndimage as ndi
9 | import skimage as sm
10 | from skimage import morphology
11 | from skimage.feature import peak_local_max
12 | from skimage.io import imshow
13 | from skimage.color import rgb2gray
14 | from skimage.filters.rank import median
15 | from skimage.measure import find_contours
16 | # image = cv2.imread("./raw_data/1.jpg")
17 | # cv2.imwrite("canny.jpg", cv2.Canny(image, 200, 300))
18 | # cv2.namedWindow("canny", cv2.WINDOW_NORMAL)
19 | # cv2.imshow("canny", cv2.imread("canny.jpg"))
20 | # cv2.waitKey()
21 | # cv2.destroyAllWindows()
22 |
23 | img = cv2.pyrDown(cv2.imread("./raw_data/1.jpg", cv2.IMREAD_UNCHANGED))
24 | # threshold 函数对图像进行二化值处理,由于处理后图像对原图像有所变化,因此img.copy()生成新的图像,cv2.THRESH_BINARY是二化值
25 | # ret, thresh = cv2.threshold(cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY), 127, 255, cv2.THRESH_BINARY)
26 | ret, thresh = cv2.threshold(cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY),0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
27 | thresh = median(thresh, sm.morphology.disk(5))
28 | kernel = np.ones((3, 3), np.uint8)
29 | opening = cv2.morphologyEx(thresh, op= cv2.MORPH_OPEN,kernel=kernel,iterations=1)
30 | # sure_bg = cv2.dilate(opening,kernel,iterations=3)#膨胀
31 | # cv2.namedWindow("thresh", cv2.WINDOW_NORMAL)
32 | # cv2.imshow("thresh", opening)
33 | # cv2.waitKey()
34 | # cv2.destroyAllWindows()
35 |
36 | # findContours函数查找图像里的图形轮廓
37 | # 函数参数thresh是图像对象
38 | # 层次类型,参数cv2.RETR_EXTERNAL是获取最外层轮廓,cv2.RETR_TREE是获取轮廓的整体结构
39 | # 轮廓逼近方法
40 | # 输出的返回值,image是原图像、contours是图像的轮廓、hier是层次类型
41 | image, contours, hier = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
42 |
43 | for c in contours:
44 | # 轮廓绘制方法一
45 | # boundingRect函数计算边框值,x,y是坐标值,w,h是矩形的宽和高
46 | # x, y, w, h = cv2.boundingRect(c)
47 | # # 在img图像画出矩形,(x, y), (x + w, y + h)是矩形坐标,(0, 255, 0)设置通道颜色,2是设置线条粗度
48 | # cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
49 |
50 | # 轮廓绘制方法二
51 | # 查找最小区域
52 | # rect = cv2.minAreaRect(c)
53 | # # 计算最小面积矩形的坐标
54 | # box = cv2.boxPoints(rect)
55 | # # 将坐标规范化为整数
56 | # box = np.int0(box)
57 | # # 绘制矩形
58 | # cv2.drawContours(img, [box], 0, (0, 0, 255), 3)
59 |
60 | # 轮廓绘制方法三
61 | # 圆心坐标和半径的计算
62 | (x, y), radius = cv2.minEnclosingCircle(c)
63 | # 规范化为整数
64 | center = (int(x), int(y))
65 | radius = int(radius)
66 | # 勾画圆形区域
67 | img = cv2.circle(img, center, radius, (0, 255, 0), 2)
68 |
69 | # # 轮廓绘制方法四
70 | # 围绕图形勾画蓝色线条
71 | cv2.drawContours(img, contours, -1, (255, 0, 0), 2)
72 | print(len(contours))
73 | # 显示图像
74 | cv2.imshow("contours", img)
75 | cv2.waitKey()
76 | cv2.destroyAllWindows()
77 |
78 |
--------------------------------------------------------------------------------
/other_image/test4.py:
--------------------------------------------------------------------------------
1 | # _*_ coding: utf-8 _*_
2 | __author__ = 'LelandYan'
3 | __date__ = '2019/5/19 7:47'
4 |
5 | import cv2
6 | import numpy as np
7 | import matplotlib.pyplot as plt
8 | from scipy import ndimage as ndi
9 | import skimage as sm
10 | from skimage import morphology
11 | from skimage.feature import peak_local_max
12 | from skimage.io import imshow
13 | from skimage.color import rgb2gray
14 | from skimage.filters.rank import median
15 | from skimage.measure import find_contours
16 |
17 | ################################################################################
18 |
19 | print('Load Image')
20 |
21 | imgFile = './raw_data/1.jpg'
22 |
23 | # load an original image
24 | img = cv2.imread(imgFile)
25 | ################################################################################
26 |
27 | # color value range
28 | cRange = 256
29 |
30 | rows, cols, channels = img.shape
31 |
32 | # convert color space from bgr to gray
33 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
34 | imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
35 | ################################################################################
36 | thresh = median(imgGray, sm.morphology.disk(5))
37 | kernel = np.ones((3, 3), np.uint8)
38 | thresh = cv2.erode(thresh,kernel,iterations=3)#膨胀
39 | # laplacian edge
40 | imgLap = cv2.Laplacian(thresh, cv2.CV_8U)
41 |
42 | # otsu method
43 | threshold, imgOtsu = cv2.threshold(thresh, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
44 |
45 | # adaptive gaussian threshold
46 | imgAdapt = cv2.adaptiveThreshold(thresh, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
47 | # imgAdapt = cv2.medianBlur(imgAdapt, 3)
48 | ################################################################################
49 |
50 | canny = cv2.Canny(imgAdapt, 10, 20)
51 | # 霍夫变换圆检测
52 | circles = cv2.HoughCircles(canny, cv2.HOUGH_GRADIENT, 1, 50, param1=80, param2=30, minRadius=0, maxRadius=50)
53 | # 输出返回值,方便查看类型
54 | # print(circles)
55 |
56 | # # 输出检测到圆的个数
57 | print(len(circles[0]))
58 | #
59 | # print('-------------我是条分割线-----------------')
60 | # 根据检测到圆的信息,画出每一个圆
61 | for circle in circles[0]:
62 | # 圆的基本信息
63 | print(circle[2])
64 | # 坐标行列(就是圆心)
65 | x = int(circle[0])
66 | y = int(circle[1])
67 | # 半径
68 | r = int(circle[2])
69 | # 在原图用指定颜色圈出圆,参数设定为int所以圈画存在误差
70 | img = cv2.circle(img, (x, y), r, (255, 255,255), 1, 8, 0)
71 | # 显示新图像
72 | cv2.namedWindow("binary2", cv2.WINDOW_NORMAL)
73 | cv2.imshow('binary2', img)
74 |
75 | # 按任意键退出
76 | cv2.waitKey(0)
77 | cv2.destroyAllWindows()
78 |
79 |
80 |
81 |
82 |
83 |
84 | # display original image and gray image
85 | # plt.subplot(2, 2, 1), plt.imshow(img), plt.title('Original Image'), plt.xticks([]), plt.yticks([])
86 | # plt.subplot(2, 2, 2), plt.imshow(imgLap, cmap='gray'), plt.title('Laplacian Edge'), plt.xticks([]), plt.yticks([])
87 | # plt.subplot(2, 2, 3), plt.imshow(imgOtsu, cmap='gray'), plt.title('Otsu Method'), plt.xticks([]), plt.yticks([])
88 | # plt.subplot(2, 2, 4), plt.imshow(imgAdapt, cmap='gray'), plt.title('Adaptive Gaussian Threshold'), plt.xticks(
89 | # []), plt.yticks([])
90 | # plt.show()
91 | # ################################################################################
92 | #
93 | # print('Goodbye!')
--------------------------------------------------------------------------------
/other_image/分水岭.py:
--------------------------------------------------------------------------------
1 | # _*_ coding: utf-8 _*_
2 | __author__ = 'LelandYan'
3 | __date__ = '2019/5/19 10:25'
4 |
5 | import cv2 as cv
6 | import numpy as np
7 |
8 | def water_shed(image):
9 | #1 去噪,灰度,二值化
10 | blurred = cv.pyrMeanShiftFiltering(image,10,30)
11 | blurred=cv.bilateralFilter(image,0,50,5)
12 | # cv.imshow('blurred',blurred)
13 | gray=cv.cvtColor(blurred,cv.COLOR_BGR2GRAY)
14 | ret,binary=cv.threshold(gray,0,255,cv.THRESH_BINARY|cv.THRESH_OTSU)
15 | # cv.imshow('binary',binary)
16 | #2. mophology 开操作去除噪点
17 | kernel=cv.getStructuringElement(cv.MORPH_RECT,(3,3))
18 | open_binary=cv.morphologyEx(binary,cv.MORPH_OPEN,kernel,iterations=2) #mophology binary,2次开操作
19 | # cv.imshow('1-open-op',open_binary)
20 | dilate_bg=cv.dilate(open_binary,kernel,iterations=3) #3次膨胀
21 | # cv.imshow('2-dilate-op',dilate_bg)
22 | #3.distance transform
23 | # DIST_L1:曼哈顿距离,DIST_L2:欧氏距离,masksize:跟卷积一样
24 | dist=cv.distanceTransform(open_binary,cv.DIST_L2,3) #??
25 | dist_norm=cv.normalize(dist,0,1.0,cv.NORM_MINMAX)# 0-1之间标准化
26 | # cv.imshow('3-distance-t',dist_norm*50)
27 |
28 | ret,surface=cv.threshold(dist,dist.max()*0.65,255,cv.THRESH_BINARY)
29 | # cv.imshow('4-surface',surface)
30 |
31 | #4计算marker
32 | surface_fg=np.uint8(surface) #计算前景
33 | unknown=cv.subtract(dilate_bg,surface_fg) #计算未知区域
34 | # cv.imshow('5-unknown',unknown)
35 | ret,markers=cv.connectedComponents(surface_fg) #通过计算cc,计算markers
36 | print(ret)
37 | # cv.imshow('6-markers',markers)
38 |
39 | #5 watershed 分水岭变换
40 | markers=markers+1 #用label进行控制
41 | markers[unknown==255]=0
42 | markers=cv.watershed(image,markers) #分水岭的地方就编程-1
43 | image[markers==-1]=[0,0,255]
44 | cv.imshow('7-result',image)
45 |
46 | src = cv.imread("./raw_data/4.jpg")
47 | # cv.imshow("gray_img", src)
48 | cv.namedWindow("result", cv.WINDOW_NORMAL)
49 | water_shed(src)
50 | cv.waitKey(0)
51 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/problems.docx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LelandYan/Image_processing/cec513730432b36ff434f77364b2338a253e98c1/problems.docx
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | certifi==2019.9.11
2 | cloudpickle==1.2.2
3 | cycler==0.10.0
4 | cytoolz==0.10.0
5 | dask==2.6.0
6 | decorator==4.4.0
7 | imageio==2.6.0
8 | kiwisolver==1.1.0
9 | matplotlib==3.1.1
10 | mkl-fft==1.0.14
11 | mkl-random==1.1.0
12 | mkl-service==2.3.0
13 | networkx==2.3
14 | numpy==1.17.3
15 | olefile==0.46
16 | opencv-contrib-python==3.4.2.16
17 | opencv-python==3.4.2.16
18 | Pillow==6.2.0
19 | pyparsing==2.4.2
20 | python-dateutil==2.8.0
21 | pytz==2019.3
22 | PyWavelets==1.0.3
23 | scikit-image==0.15.0
24 | scipy==1.3.1
25 | six==1.12.0
26 | toolz==0.10.0
27 | tornado==6.0.3
28 | wincertstore==0.2
29 |
--------------------------------------------------------------------------------