├── Parts ├── scanner_0.py ├── scanner_1.py ├── scanner_2.py ├── scanner_3.py ├── scanner_4.py ├── scanner_5.py └── scanner_6.py ├── README.md └── scanner.py /Parts/scanner_0.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | 3 | cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW) 4 | 5 | WIDTH, HEIGHT = 800, 600 6 | cap.set(cv2.CAP_PROP_FRAME_WIDTH, WIDTH) 7 | cap.set(cv2.CAP_PROP_FRAME_HEIGHT, HEIGHT) 8 | 9 | while True: 10 | 11 | _, frame = cap.read() 12 | frame = cv2.rotate(frame, cv2.ROTATE_180) 13 | frame_copy = frame.copy() 14 | 15 | cv2.imshow("input", frame) 16 | 17 | if cv2.waitKey(1) & 0xFF == 27: 18 | break 19 | 20 | cv2.destroyAllWindows() 21 | -------------------------------------------------------------------------------- /Parts/scanner_1.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | 4 | cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW) 5 | 6 | WIDTH, HEIGHT = 800, 600 7 | cap.set(cv2.CAP_PROP_FRAME_WIDTH, WIDTH) 8 | cap.set(cv2.CAP_PROP_FRAME_HEIGHT, HEIGHT) 9 | 10 | 11 | def scan_detection(image): 12 | document_contour = np.array([[0, 0], [WIDTH, 0], [WIDTH, HEIGHT], [0, HEIGHT]]) 13 | 14 | gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 15 | blur = cv2.GaussianBlur(gray, (5, 5), 0) 16 | _, threshold = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) 17 | 18 | contours, _ = cv2.findContours(threshold, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) 19 | contours = sorted(contours, key=cv2.contourArea, reverse=True) 20 | 21 | max_area = 0 22 | for contour in contours: 23 | area = cv2.contourArea(contour) 24 | if area > 1000: 25 | peri = cv2.arcLength(contour, True) 26 | approx = cv2.approxPolyDP(contour, 0.015 * peri, True) 27 | if area > max_area and len(approx) == 4: 28 | document_contour = approx 29 | max_area = area 30 | 31 | cv2.drawContours(frame, [document_contour], -1, (0, 255, 0), 3) 32 | 33 | 34 | while True: 35 | 36 | _, frame = cap.read() 37 | frame = cv2.rotate(frame, cv2.ROTATE_180) 38 | frame_copy = frame.copy() 39 | 40 | scan_detection(frame_copy) 41 | 42 | cv2.imshow("input", frame) 43 | 44 | if cv2.waitKey(1) & 0xFF == 27: 45 | break 46 | 47 | cv2.destroyAllWindows() 48 | -------------------------------------------------------------------------------- /Parts/scanner_2.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from imutils.perspective import four_point_transform 4 | 5 | cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW) 6 | 7 | WIDTH, HEIGHT = 800, 600 8 | cap.set(cv2.CAP_PROP_FRAME_WIDTH, WIDTH) 9 | cap.set(cv2.CAP_PROP_FRAME_HEIGHT, HEIGHT) 10 | 11 | 12 | def scan_detection(image): 13 | global document_contour 14 | 15 | document_contour = np.array([[0, 0], [WIDTH, 0], [WIDTH, HEIGHT], [0, HEIGHT]]) 16 | 17 | gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 18 | blur = cv2.GaussianBlur(gray, (5, 5), 0) 19 | _, threshold = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) 20 | 21 | contours, _ = cv2.findContours(threshold, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) 22 | contours = sorted(contours, key=cv2.contourArea, reverse=True) 23 | 24 | max_area = 0 25 | for contour in contours: 26 | area = cv2.contourArea(contour) 27 | if area > 1000: 28 | peri = cv2.arcLength(contour, True) 29 | approx = cv2.approxPolyDP(contour, 0.015 * peri, True) 30 | if area > max_area and len(approx) == 4: 31 | document_contour = approx 32 | max_area = area 33 | 34 | cv2.drawContours(frame, [document_contour], -1, (0, 255, 0), 3) 35 | 36 | 37 | while True: 38 | 39 | _, frame = cap.read() 40 | frame = cv2.rotate(frame, cv2.ROTATE_180) 41 | frame_copy = frame.copy() 42 | 43 | scan_detection(frame_copy) 44 | 45 | cv2.imshow("input", frame) 46 | 47 | warped = four_point_transform(frame_copy, document_contour.reshape(4, 2)) 48 | cv2.imshow("Warped", warped) 49 | 50 | if cv2.waitKey(1) & 0xFF == 27: 51 | break 52 | 53 | cv2.destroyAllWindows() 54 | -------------------------------------------------------------------------------- /Parts/scanner_3.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from imutils.perspective import four_point_transform 4 | import pytesseract 5 | 6 | cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW) 7 | 8 | pytesseract.pytesseract.tesseract_cmd = 'C:\\Program Files\\Tesseract-OCR\\tesseract.exe' 9 | 10 | WIDTH, HEIGHT = 800, 600 11 | cap.set(cv2.CAP_PROP_FRAME_WIDTH, WIDTH) 12 | cap.set(cv2.CAP_PROP_FRAME_HEIGHT, HEIGHT) 13 | 14 | 15 | def scan_detection(image): 16 | global document_contour 17 | 18 | document_contour = np.array([[0, 0], [WIDTH, 0], [WIDTH, HEIGHT], [0, HEIGHT]]) 19 | 20 | gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 21 | blur = cv2.GaussianBlur(gray, (5, 5), 0) 22 | _, threshold = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) 23 | 24 | contours, _ = cv2.findContours(threshold, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) 25 | contours = sorted(contours, key=cv2.contourArea, reverse=True) 26 | 27 | max_area = 0 28 | for contour in contours: 29 | area = cv2.contourArea(contour) 30 | if area > 1000: 31 | peri = cv2.arcLength(contour, True) 32 | approx = cv2.approxPolyDP(contour, 0.015 * peri, True) 33 | if area > max_area and len(approx) == 4: 34 | document_contour = approx 35 | max_area = area 36 | 37 | cv2.drawContours(frame, [document_contour], -1, (0, 255, 0), 3) 38 | 39 | 40 | while True: 41 | 42 | _, frame = cap.read() 43 | frame = cv2.rotate(frame, cv2.ROTATE_180) 44 | frame_copy = frame.copy() 45 | 46 | scan_detection(frame_copy) 47 | 48 | cv2.imshow("input", frame) 49 | 50 | warped = four_point_transform(frame_copy, document_contour.reshape(4, 2)) 51 | cv2.imshow("Warped", warped) 52 | 53 | ocr_text = pytesseract.image_to_string(warped) 54 | # print(ocr_text) 55 | 56 | if cv2.waitKey(1) & 0xFF == 27: 57 | break 58 | 59 | cv2.destroyAllWindows() 60 | -------------------------------------------------------------------------------- /Parts/scanner_4.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from imutils.perspective import four_point_transform 4 | import pytesseract 5 | 6 | cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW) 7 | 8 | pytesseract.pytesseract.tesseract_cmd = 'C:\\Program Files\\Tesseract-OCR\\tesseract.exe' 9 | 10 | WIDTH, HEIGHT = 800, 600 11 | cap.set(cv2.CAP_PROP_FRAME_WIDTH, WIDTH) 12 | cap.set(cv2.CAP_PROP_FRAME_HEIGHT, HEIGHT) 13 | 14 | 15 | def image_processing(image): 16 | gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 17 | _, threshold = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY) 18 | 19 | return threshold 20 | 21 | 22 | def scan_detection(image): 23 | global document_contour 24 | 25 | document_contour = np.array([[0, 0], [WIDTH, 0], [WIDTH, HEIGHT], [0, HEIGHT]]) 26 | 27 | gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 28 | blur = cv2.GaussianBlur(gray, (5, 5), 0) 29 | _, threshold = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) 30 | 31 | contours, _ = cv2.findContours(threshold, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) 32 | contours = sorted(contours, key=cv2.contourArea, reverse=True) 33 | 34 | max_area = 0 35 | for contour in contours: 36 | area = cv2.contourArea(contour) 37 | if area > 1000: 38 | peri = cv2.arcLength(contour, True) 39 | approx = cv2.approxPolyDP(contour, 0.015 * peri, True) 40 | if area > max_area and len(approx) == 4: 41 | document_contour = approx 42 | max_area = area 43 | 44 | cv2.drawContours(frame, [document_contour], -1, (0, 255, 0), 3) 45 | 46 | 47 | while True: 48 | 49 | _, frame = cap.read() 50 | frame = cv2.rotate(frame, cv2.ROTATE_180) 51 | frame_copy = frame.copy() 52 | 53 | scan_detection(frame_copy) 54 | 55 | cv2.imshow("input", frame) 56 | 57 | warped = four_point_transform(frame_copy, document_contour.reshape(4, 2)) 58 | cv2.imshow("Warped", warped) 59 | 60 | processed = image_processing(warped) 61 | processed = processed[10:processed.shape[0] - 10, 10:processed.shape[1] - 10] 62 | cv2.imshow("Processed", processed) 63 | 64 | ocr_text = pytesseract.image_to_string(warped) 65 | # print(ocr_text) 66 | 67 | if cv2.waitKey(1) & 0xFF == 27: 68 | break 69 | 70 | cv2.destroyAllWindows() 71 | -------------------------------------------------------------------------------- /Parts/scanner_5.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from imutils.perspective import four_point_transform 4 | import pytesseract 5 | 6 | cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW) 7 | 8 | pytesseract.pytesseract.tesseract_cmd = 'C:\\Program Files\\Tesseract-OCR\\tesseract.exe' 9 | 10 | count = 0 11 | 12 | font = cv2.FONT_HERSHEY_SIMPLEX 13 | 14 | WIDTH, HEIGHT = 800, 600 15 | cap.set(cv2.CAP_PROP_FRAME_WIDTH, WIDTH) 16 | cap.set(cv2.CAP_PROP_FRAME_HEIGHT, HEIGHT) 17 | 18 | 19 | def image_processing(image): 20 | gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 21 | _, threshold = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY) 22 | 23 | return threshold 24 | 25 | 26 | def scan_detection(image): 27 | global document_contour 28 | 29 | document_contour = np.array([[0, 0], [WIDTH, 0], [WIDTH, HEIGHT], [0, HEIGHT]]) 30 | 31 | gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 32 | blur = cv2.GaussianBlur(gray, (5, 5), 0) 33 | _, threshold = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) 34 | 35 | contours, _ = cv2.findContours(threshold, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) 36 | contours = sorted(contours, key=cv2.contourArea, reverse=True) 37 | 38 | max_area = 0 39 | for contour in contours: 40 | area = cv2.contourArea(contour) 41 | if area > 1000: 42 | peri = cv2.arcLength(contour, True) 43 | approx = cv2.approxPolyDP(contour, 0.015 * peri, True) 44 | if area > max_area and len(approx) == 4: 45 | document_contour = approx 46 | max_area = area 47 | 48 | cv2.drawContours(frame, [document_contour], -1, (0, 255, 0), 3) 49 | 50 | 51 | def center_text(image, text): 52 | text_size = cv2.getTextSize(text, font, 2, 5)[0] 53 | text_x = (image.shape[1] - text_size[0]) // 2 54 | text_y = (image.shape[0] + text_size[1]) // 2 55 | cv2.putText(image, text, (text_x, text_y), font, 2, (255, 0, 255), 5, cv2.LINE_AA) 56 | 57 | 58 | while True: 59 | 60 | _, frame = cap.read() 61 | frame = cv2.rotate(frame, cv2.ROTATE_180) 62 | frame_copy = frame.copy() 63 | 64 | scan_detection(frame_copy) 65 | 66 | cv2.imshow("input", frame) 67 | 68 | warped = four_point_transform(frame_copy, document_contour.reshape(4, 2)) 69 | cv2.imshow("Warped", warped) 70 | 71 | processed = image_processing(warped) 72 | processed = processed[10:processed.shape[0] - 10, 10:processed.shape[1] - 10] 73 | cv2.imshow("Processed", processed) 74 | 75 | pressed_key = cv2.waitKey(1) & 0xFF 76 | if pressed_key == 27: 77 | break 78 | 79 | elif pressed_key == ord('s'): 80 | cv2.imwrite("output/scanned_" + str(count) + ".jpg", processed) 81 | count += 1 82 | 83 | center_text(frame, "Scan Saved") 84 | cv2.imshow("input", frame) 85 | cv2.waitKey(500) 86 | 87 | elif pressed_key == ord('o'): 88 | file = open("output/recognized_" + str(count - 1) + ".txt", "w") 89 | ocr_text = pytesseract.image_to_string(warped) 90 | # print(ocr_text) 91 | file.write(ocr_text) 92 | file.close() 93 | 94 | center_text(frame, "Text Saved") 95 | cv2.imshow("input", frame) 96 | cv2.waitKey(500) 97 | 98 | cv2.destroyAllWindows() 99 | -------------------------------------------------------------------------------- /Parts/scanner_6.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from imutils.perspective import four_point_transform 4 | import pytesseract 5 | 6 | cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW) 7 | 8 | pytesseract.pytesseract.tesseract_cmd = 'C:\\Program Files\\Tesseract-OCR\\tesseract.exe' 9 | 10 | count = 0 11 | scale = 0.5 12 | 13 | font = cv2.FONT_HERSHEY_SIMPLEX 14 | 15 | WIDTH, HEIGHT = 1920, 1080 16 | cap.set(cv2.CAP_PROP_FRAME_WIDTH, WIDTH) 17 | cap.set(cv2.CAP_PROP_FRAME_HEIGHT, HEIGHT) 18 | 19 | 20 | def image_processing(image): 21 | gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 22 | _, threshold = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY) 23 | 24 | return threshold 25 | 26 | 27 | def scan_detection(image): 28 | global document_contour 29 | 30 | document_contour = np.array([[0, 0], [WIDTH, 0], [WIDTH, HEIGHT], [0, HEIGHT]]) 31 | 32 | gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 33 | blur = cv2.GaussianBlur(gray, (5, 5), 0) 34 | _, threshold = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) 35 | 36 | contours, _ = cv2.findContours(threshold, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) 37 | contours = sorted(contours, key=cv2.contourArea, reverse=True) 38 | 39 | max_area = 0 40 | for contour in contours: 41 | area = cv2.contourArea(contour) 42 | if area > 1000: 43 | peri = cv2.arcLength(contour, True) 44 | approx = cv2.approxPolyDP(contour, 0.015 * peri, True) 45 | if area > max_area and len(approx) == 4: 46 | document_contour = approx 47 | max_area = area 48 | 49 | cv2.drawContours(frame, [document_contour], -1, (0, 255, 0), 3) 50 | 51 | 52 | def center_text(image, text): 53 | text_size = cv2.getTextSize(text, font, 2, 5)[0] 54 | text_x = (image.shape[1] - text_size[0]) // 2 55 | text_y = (image.shape[0] + text_size[1]) // 2 56 | cv2.putText(image, text, (text_x, text_y), font, 2, (255, 0, 255), 5, cv2.LINE_AA) 57 | 58 | 59 | while True: 60 | 61 | _, frame = cap.read() 62 | frame = cv2.rotate(frame, cv2.ROTATE_180) 63 | frame_copy = frame.copy() 64 | 65 | scan_detection(frame_copy) 66 | 67 | cv2.imshow("input", cv2.resize(frame, (int(scale * WIDTH), int(scale * HEIGHT)))) 68 | 69 | warped = four_point_transform(frame_copy, document_contour.reshape(4, 2)) 70 | cv2.imshow("Warped", cv2.resize(warped, (int(scale * warped.shape[1]), int(scale * warped.shape[0])))) 71 | 72 | processed = image_processing(warped) 73 | processed = processed[10:processed.shape[0] - 10, 10:processed.shape[1] - 10] 74 | cv2.imshow("Processed", cv2.resize(processed, (int(scale * processed.shape[1]), 75 | int(scale * processed.shape[0])))) 76 | 77 | pressed_key = cv2.waitKey(1) & 0xFF 78 | if pressed_key == 27: 79 | break 80 | 81 | elif pressed_key == ord('s'): 82 | cv2.imwrite("output/scanned_" + str(count) + ".jpg", processed) 83 | count += 1 84 | 85 | center_text(frame, "Scan Saved") 86 | cv2.imshow("input", cv2.resize(frame, (int(scale * WIDTH), int(scale * HEIGHT)))) 87 | cv2.waitKey(500) 88 | 89 | elif pressed_key == ord('o'): 90 | file = open("output/recognized_" + str(count - 1) + ".txt", "w") 91 | ocr_text = pytesseract.image_to_string(warped) 92 | # print(ocr_text) 93 | file.write(ocr_text) 94 | file.close() 95 | 96 | center_text(frame, "Text Saved") 97 | cv2.imshow("input", cv2.resize(frame, (int(scale * WIDTH), int(scale * HEIGHT)))) 98 | cv2.waitKey(500) 99 | 100 | cv2.destroyAllWindows() 101 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Document_scanner 2 | Document scanner using OpenCV with Python 3 | 4 | ![1](https://user-images.githubusercontent.com/72137556/171441247-92507880-b4f6-42ea-ae72-a1bee941e536.png) 5 | -------------------------------------------------------------------------------- /scanner.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from imutils.perspective import four_point_transform 4 | import pytesseract 5 | 6 | cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW) 7 | 8 | pytesseract.pytesseract.tesseract_cmd = 'C:\\Program Files\\Tesseract-OCR\\tesseract.exe' 9 | 10 | count = 0 11 | scale = 0.5 12 | 13 | font = cv2.FONT_HERSHEY_SIMPLEX 14 | 15 | WIDTH, HEIGHT = 1920, 1080 16 | cap.set(cv2.CAP_PROP_FRAME_WIDTH, WIDTH) 17 | cap.set(cv2.CAP_PROP_FRAME_HEIGHT, HEIGHT) 18 | 19 | 20 | def image_processing(image): 21 | gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 22 | _, threshold = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY) 23 | 24 | return threshold 25 | 26 | 27 | def scan_detection(image): 28 | global document_contour 29 | 30 | document_contour = np.array([[0, 0], [WIDTH, 0], [WIDTH, HEIGHT], [0, HEIGHT]]) 31 | 32 | gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 33 | blur = cv2.GaussianBlur(gray, (5, 5), 0) 34 | _, threshold = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) 35 | 36 | contours, _ = cv2.findContours(threshold, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) 37 | contours = sorted(contours, key=cv2.contourArea, reverse=True) 38 | 39 | max_area = 0 40 | for contour in contours: 41 | area = cv2.contourArea(contour) 42 | if area > 1000: 43 | peri = cv2.arcLength(contour, True) 44 | approx = cv2.approxPolyDP(contour, 0.015 * peri, True) 45 | if area > max_area and len(approx) == 4: 46 | document_contour = approx 47 | max_area = area 48 | 49 | cv2.drawContours(frame, [document_contour], -1, (0, 255, 0), 3) 50 | 51 | 52 | def center_text(image, text): 53 | text_size = cv2.getTextSize(text, font, 2, 5)[0] 54 | text_x = (image.shape[1] - text_size[0]) // 2 55 | text_y = (image.shape[0] + text_size[1]) // 2 56 | cv2.putText(image, text, (text_x, text_y), font, 2, (255, 0, 255), 5, cv2.LINE_AA) 57 | 58 | 59 | while True: 60 | 61 | _, frame = cap.read() 62 | frame = cv2.rotate(frame, cv2.ROTATE_180) 63 | frame_copy = frame.copy() 64 | 65 | scan_detection(frame_copy) 66 | 67 | cv2.imshow("input", cv2.resize(frame, (int(scale * WIDTH), int(scale * HEIGHT)))) 68 | 69 | warped = four_point_transform(frame_copy, document_contour.reshape(4, 2)) 70 | cv2.imshow("Warped", cv2.resize(warped, (int(scale * warped.shape[1]), int(scale * warped.shape[0])))) 71 | 72 | processed = image_processing(warped) 73 | processed = processed[10:processed.shape[0] - 10, 10:processed.shape[1] - 10] 74 | cv2.imshow("Processed", cv2.resize(processed, (int(scale * processed.shape[1]), 75 | int(scale * processed.shape[0])))) 76 | 77 | pressed_key = cv2.waitKey(1) & 0xFF 78 | if pressed_key == 27: 79 | break 80 | 81 | elif pressed_key == ord('s'): 82 | cv2.imwrite("output/scanned_" + str(count) + ".jpg", processed) 83 | count += 1 84 | 85 | center_text(frame, "Scan Saved") 86 | cv2.imshow("input", cv2.resize(frame, (int(scale * WIDTH), int(scale * HEIGHT)))) 87 | cv2.waitKey(500) 88 | 89 | elif pressed_key == ord('o'): 90 | file = open("output/recognized_" + str(count - 1) + ".txt", "w") 91 | ocr_text = pytesseract.image_to_string(warped) 92 | # print(ocr_text) 93 | file.write(ocr_text) 94 | file.close() 95 | 96 | center_text(frame, "Text Saved") 97 | cv2.imshow("input", cv2.resize(frame, (int(scale * WIDTH), int(scale * HEIGHT)))) 98 | cv2.waitKey(500) 99 | 100 | cv2.destroyAllWindows() 101 | --------------------------------------------------------------------------------