├── .gitignore
├── m.js
├── porn
├── porn
│ ├── __init__.py
│ ├── items.py
│ ├── middlewares.py
│ ├── pipelines.py
│ ├── settings.py
│ └── spiders
│ │ ├── PornSpider.py
│ │ └── __init__.py
├── scrapy.cfg
└── setup.py
├── readme.md
└── src
└── 1.jpg
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea
2 | __pycache__
3 | log.txt
4 | porn/porn/units.py
--------------------------------------------------------------------------------
/m.js:
--------------------------------------------------------------------------------
1 | bxqrm = '__0x99c1f'
2 | , __0x99c1f = ['PsOMcMOTVQ==', 'wpHDoSE3fA==', 'GsO6wpDDsMOZS8O8JMKmw6hcEcOF', 'ecOYw4TCvDY=', 'wotowqbDi3I=', 'BcKewocQwqjCkw==', 'w4zCqELDj8O8', 'wpzDgCPDgsO1MFrCmcO5Ly3CrA==', 'AyoSw450JcK4dQ3Cnw==', 'WndFTcOR', 'w5bCtFxgwqE=', 'VsKfY8KMQg==', 'DsKgw4VRaiw=', 'b29sVcO+', 'w4jCpAk=', 'w5xEwpgaHQ==', 'f39tUMOt', 'wrzDtxoTfjLDsFDDpMKOw5PCncKTNQ==', 'LsKewrg6wr8=', '5YmI6Zim54mJ5pyU5Y6077yye0Lkv6zlr6zmnL/lvpnnqaM=', 'XcKEJsO7w4w=', 'woPCix19w5/CisK9w6TDgkVOEcO0', 'LsKkw7XDgFA=', 'worDhcOswownVg==', 'aWfCpjPCjQ==', 'wrMcc8KoV8KQ', 'ARABw4R+', 'OcKWw6HDo1w=', 'Y3xJSMOo', 'L1zCojrCrQ==', 'JsOiw7/CrDfCgQEdwrnClMKYZQ==', 'CsKTwogFwp/ClGnCmcKrw4M=', 'JQ9q', 'NcO+w7TCpBLCgA4Kwp4=', '54ue5pyr5Y+v77ypw4LDteS8r+Wvg+afgeW9muepne+9t+i/m+iso+aXueaNpuaIguS6meeauOW1t+S9rg==', 'M0oq', '5YiL6Zui54us5p6g5Yyc77y7wqAr5L6J5ayO5p2z5b2U56mh', 'woHDpcO2wrA/', 'w5Biw74YwpM=', 'BzVx', 'S21TR8OQ', 'dHdnRcON', 'w5zCrEbDpcObwpHChcOHw4DCgHR7dgY=', 'w5XCh17DqMOS'];
3 | (function(_0x20326f, _0x20880f) {
4 | var _0x564cb8 = function(_0x4e7d5f) {
5 | while (--_0x4e7d5f) {
6 | _0x20326f['push'](_0x20326f['shift']());
7 | }
8 | };
9 | _0x564cb8(++_0x20880f);
10 | }(__0x99c1f, 0x1a1));
11 | var func1 = function(_0x231fd0, _0x4f680a) {
12 | _0x231fd0 = _0x231fd0 - 0x0;
13 | var _0x5b4826 = __0x99c1f[_0x231fd0];
14 | if (func1['initialized'] === undefined) {
15 | (function() {
16 | var _0x550fbc = typeof window !== 'undefined' ? window : typeof process === 'object' && typeof require === 'function' && typeof global === 'object' ? global : this;
17 | var _0x18d5c9 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=';
18 | _0x550fbc['atob'] || (_0x550fbc['atob'] = function(_0x4ce2f1) {
19 | var _0x333808 = String(_0x4ce2f1)['replace'](/=+$/, '');
20 | for (var _0x432180 = 0x0, _0x2ab90b, _0x991246, _0x981158 = 0x0, _0x57b080 = ''; _0x991246 = _0x333808['charAt'](_0x981158++); ~_0x991246 && (_0x2ab90b = _0x432180 % 0x4 ? _0x2ab90b * 0x40 + _0x991246 : _0x991246,
21 | _0x432180++ % 0x4) ? _0x57b080 += String['fromCharCode'](0xff & _0x2ab90b >> (-0x2 * _0x432180 & 0x6)) : 0x0) {
22 | _0x991246 = _0x18d5c9['indexOf'](_0x991246);
23 | }
24 | return _0x57b080;
25 | }
26 | );
27 | }());
28 | var _0x219af0 = function(_0x441e3a, _0x2cc193) {
29 | var _0x5f41ea = [], _0x503809 = 0x0, _0xe42b77, _0x56465b = '', _0x52cace = '';
30 | _0x441e3a = atob(_0x441e3a);
31 | for (var _0x39753a = 0x0, _0xf81284 = _0x441e3a['length']; _0x39753a < _0xf81284; _0x39753a++) {
32 | _0x52cace += '%' + ('00' + _0x441e3a['charCodeAt'](_0x39753a)['toString'](0x10))['slice'](-0x2);
33 | }
34 | _0x441e3a = decodeURIComponent(_0x52cace);
35 | for (var _0x307b3e = 0x0; _0x307b3e < 0x100; _0x307b3e++) {
36 | _0x5f41ea[_0x307b3e] = _0x307b3e;
37 | }
38 | for (_0x307b3e = 0x0; _0x307b3e < 0x100; _0x307b3e++) {
39 | _0x503809 = (_0x503809 + _0x5f41ea[_0x307b3e] + _0x2cc193['charCodeAt'](_0x307b3e % _0x2cc193['length'])) % 0x100;
40 | _0xe42b77 = _0x5f41ea[_0x307b3e];
41 | _0x5f41ea[_0x307b3e] = _0x5f41ea[_0x503809];
42 | _0x5f41ea[_0x503809] = _0xe42b77;
43 | }
44 | _0x307b3e = 0x0;
45 | _0x503809 = 0x0;
46 | for (var _0x3ab53f = 0x0; _0x3ab53f < _0x441e3a['length']; _0x3ab53f++) {
47 | _0x307b3e = (_0x307b3e + 0x1) % 0x100;
48 | _0x503809 = (_0x503809 + _0x5f41ea[_0x307b3e]) % 0x100;
49 | _0xe42b77 = _0x5f41ea[_0x307b3e];
50 | _0x5f41ea[_0x307b3e] = _0x5f41ea[_0x503809];
51 | _0x5f41ea[_0x503809] = _0xe42b77;
52 | _0x56465b += String['fromCharCode'](_0x441e3a['charCodeAt'](_0x3ab53f) ^ _0x5f41ea[(_0x5f41ea[_0x307b3e] + _0x5f41ea[_0x503809]) % 0x100]);
53 | }
54 | return _0x56465b;
55 | };
56 | func1['rc4'] = _0x219af0;
57 | func1['data'] = {};
58 | func1['initialized'] = !![];
59 | }
60 | var _0xfeb75b = func1['data'][_0x231fd0];
61 | if (_0xfeb75b === undefined) {
62 | if (func1['once'] === undefined) {
63 | func1['once'] = !![];
64 | }
65 | _0x5b4826 = func1['rc4'](_0x5b4826, _0x4f680a);
66 | func1['data'][_0x231fd0] = _0x5b4826;
67 | } else {
68 | _0x5b4826 = _0xfeb75b;
69 | }
70 | return _0x5b4826;
71 | };
72 | function strencode(str1, str2, str3) {
73 | var a = {
74 | 'rUJzL': func1('0x0', 'l6Io'),
75 | 'aRrxI': function _0x49676a(_0x1630be, _0x13bc8a) {
76 | return _0x1630be(_0x13bc8a);
77 | },
78 | 'dBxJx': function _0x5cfff4(_0x464ec4, _0x475764) {
79 | return _0x464ec4 == _0x475764;
80 | },
81 | 'zfcNo': function _0x1aca76(_0x4f2cfe, _0x2e2fc3) {
82 | return _0x4f2cfe < _0x2e2fc3;
83 | },
84 | 'NqIoV': function _0xc1f9d6(_0x375348, _0x1d4824) {
85 | return _0x375348 % _0x1d4824;
86 | }
87 | };
88 | var _0x5913a9 = a['rUJzL'][func1('0x1', '(CgI')]('|')
89 | , _0x9727ce = 0x0;
90 | while (!![]) {
91 | switch (_0x5913a9[_0x9727ce++]) {
92 | case '0':
93 | l = str3[func1('0x2', '1K^x')](-0x1);
94 | continue;
95 | case '1':
96 | return a[func1('0x3', 'gRb5')](atob, code);
97 | case '2':
98 | len = str2[func1('0x4', 'S8ez')];
99 | continue;
100 | case '3':
101 | str1 = a[func1('0x5', 'ymN[')](atob, str1);
102 | continue;
103 | case '4':
104 | if (a[func1('0x6', '(CgI')](l, 0x2)) {
105 | t = str1;
106 | str1 = str2;
107 | str2 = t;
108 | }
109 | continue;
110 | case '5':
111 | for (i = 0x0; a[func1('0x7', 'J1vC')](i, str1['length']); i++) {
112 | k = a[func1('0x8', 'N3$4')](i, len);
113 | code += String[func1('0x9', '9mT#')](str1['charCodeAt'](i) ^ str2[func1('0xa', 'JIFn')](k));
114 | }
115 | continue;
116 | case '6':
117 | code = '';
118 | continue;
119 | }
120 | break;
121 | }
122 | }
--------------------------------------------------------------------------------
/porn/porn/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xinghe98/91porn/d369900544b78d980f429590f4a5c52423443e98/porn/porn/__init__.py
--------------------------------------------------------------------------------
/porn/porn/items.py:
--------------------------------------------------------------------------------
1 | # Define here the models for your scraped items
2 | #
3 | # See documentation in:
4 | # https://docs.scrapy.org/en/latest/topics/items.html
5 |
6 | import scrapy
7 |
8 |
9 | class PornItem(scrapy.Item):
10 | # define the fields for your item here like:
11 | # name = scrapy.Field()
12 | url = scrapy.Field()
13 | title = scrapy.Field()
14 | add_time = scrapy.Field()
15 | duration = scrapy.Field()
16 | author = scrapy.Field()
17 | views = scrapy.Field()
18 | message = scrapy.Field()
19 | collect = scrapy.Field()
20 | like = scrapy.Field()
21 | dislike = scrapy.Field()
22 | video_url = scrapy.Field()
23 |
24 |
25 |
--------------------------------------------------------------------------------
/porn/porn/middlewares.py:
--------------------------------------------------------------------------------
1 | import random
2 | from porn.units import proxy_url
3 | import base64
4 | import aiohttp
5 | import json
6 |
7 | class RandomUserAgentMiddleware(object):
8 | def __init__(self):
9 | self.user_agent = [
10 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36",
11 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36"
12 | ]
13 |
14 | def process_request(self, request, spider):
15 | request.headers['User-Agent'] = random.choice(self.user_agent)
16 |
17 |
18 | class ProxyMiddleware(object):
19 | proxy_url = proxy_url
20 |
21 |
22 |
23 | async def process_request(self, request, spider):
24 | request.meta['max_retry_times'] = 10
25 | async with aiohttp.ClientSession() as client:
26 | resp = await client.get(proxy_url)
27 | if not resp.status == 200:
28 | return
29 | res = json.loads(await resp.text())
30 | ip = res['data'][0]['ip']
31 | port = res['data'][0]['port']
32 | proxy = 'https://' + str(ip) + ':' + str(port)
33 | request.meta['proxy'] = proxy
34 | auths = '7894ab:bec5cc3'#代理ip服务商提供的账号密码,如果没有请删除此行和下一行代码
35 | request.headers['Proxy-Authorization'] = b'Basic ' + auth
36 |
37 |
38 |
39 |
40 | class Randomip(object):
41 | def genip(self):
42 | m=random.randint(0,255)
43 | n=random.randint(0,255)
44 | x=random.randint(0,255)
45 | y=random.randint(0,255)
46 | randomIP=str(m)+'.'+str(n)+'.'+str(x)+'.'+str(y)
47 | return randomIP
48 |
49 | def process_request(self, request, spider):
50 | request.headers['X-Forwarded-For'] = self.genip()
51 |
52 |
--------------------------------------------------------------------------------
/porn/porn/pipelines.py:
--------------------------------------------------------------------------------
1 | # Define your item pipelines here
2 | #
3 | # Don't forget to add your pipeline to the ITEM_PIPELINES setting
4 | # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
5 |
6 |
7 | # useful for handling different item types with a single interface
8 | import pymongo
9 | from itemadapter import ItemAdapter
10 | from porn.units import MONGO_URI
11 |
12 |
13 | class PornPipeline:
14 | collection_name = 'pornb'
15 |
16 | def __init__(self, mongo_db):
17 | self.mongo_uri = MONGO_URI
18 | self.mongo_db = mongo_db
19 |
20 | @classmethod
21 | def from_crawler(cls, crawler):
22 | return cls(
23 | mongo_db=crawler.settings.get('MONGO_DATABASE')
24 | )
25 |
26 | def open_spider(self, spider):
27 | self.client = pymongo.MongoClient(self.mongo_uri)
28 | self.db = self.client[self.mongo_db]
29 |
30 | def close_spider(self, spider):
31 | self.client.close()
32 |
33 | def process_item(self, item, spider):
34 | self.db[self.collection_name].update_one({'video_url': item['video_url']}, {'$set': dict(item)}, True)
35 | return item
36 |
--------------------------------------------------------------------------------
/porn/porn/settings.py:
--------------------------------------------------------------------------------
1 | # Scrapy settings for porn project
2 | #
3 | # For simplicity, this file contains only settings considered important or
4 | # commonly used. You can find more settings consulting the documentation:
5 | #
6 | # https://docs.scrapy.org/en/latest/topics/settings.html
7 | # https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
8 | # https://docs.scrapy.org/en/latest/topics/spider-middleware.html
9 |
10 | #LOG_FILE="log.txt"
11 | LOG_LEVEL = 'DEBUG'
12 |
13 | BOT_NAME = 'porn'
14 |
15 | SPIDER_MODULES = ['porn.spiders']
16 | NEWSPIDER_MODULE = 'porn.spiders'
17 | MONGO_DATABASE = '91'
18 |
19 |
20 | # Crawl responsibly by identifying yourself (and your website) on the user-agent
21 | # USER_AGENT = 'porn (+http://www.yourdomain.com)'
22 |
23 | # Obey robots.txt rules
24 | # ROBOTSTXT_OBEY = True
25 |
26 | # Configure maximum concurrent requests performed by Scrapy (default: 16)
27 | # CONCURRENT_REQUESTS = 32
28 |
29 | # Configure a delay for requests for the same website (default: 0)
30 | # See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
31 | # See also autothrottle settings and docs
32 | # DOWNLOAD_DELAY = 3
33 | # The download delay setting will honor only one of:
34 | # CONCURRENT_REQUESTS_PER_DOMAIN = 16
35 | # CONCURRENT_REQUESTS_PER_IP = 16
36 |
37 | # Disable cookies (enabled by default)
38 | # COOKIES_ENABLED = False
39 |
40 | # Disable Telnet Console (enabled by default)
41 | # TELNETCONSOLE_ENABLED = False
42 |
43 | # Override the default request headers:
44 | DEFAULT_REQUEST_HEADERS = {
45 | 'Host': '91porn.com',
46 | 'accept-language': 'zh-CN,zh;q=0.9'
47 | }
48 |
49 | # Enable or disable spider middlewares
50 | # See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
51 | # SPIDER_MIDDLEWARES = {
52 | # 'porn.middlewares.RandomUserAgentMiddleware': 1000
53 | # }
54 |
55 | # Enable or disable downloader middlewares
56 | # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
57 | DOWNLOADER_MIDDLEWARES = {
58 | 'porn.middlewares.RandomUserAgentMiddleware': 543,
59 | 'porn.middlewares.ProxyMiddleware' : 544,
60 | 'porn.middlewares.Randomip': 545
61 | }
62 |
63 | # Enable or disable extensions
64 | # See https://docs.scrapy.org/en/latest/topics/extensions.html
65 | # EXTENSIONS = {
66 | # 'scrapy.extensions.telnet.TelnetConsole': None,
67 | # }
68 |
69 | # Configure item pipelines
70 | # See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
71 | ITEM_PIPELINES = {
72 | 'porn.pipelines.PornPipeline': 300,
73 | }
74 |
75 | # Enable and configure the AutoThrottle extension (disabled by default)
76 | # See https://docs.scrapy.org/en/latest/topics/autothrottle.html
77 | # AUTOTHROTTLE_ENABLED = True
78 | # The initial download delay
79 | # AUTOTHROTTLE_START_DELAY = 5
80 | # The maximum download delay to be set in case of high latencies
81 | # AUTOTHROTTLE_MAX_DELAY = 60
82 | # The average number of requests Scrapy should be sending in parallel to
83 | # each remote server
84 | # AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
85 | # Enable showing throttling stats for every response received:
86 | # AUTOTHROTTLE_DEBUG = False
87 |
88 | # Enable and configure HTTP caching (disabled by default)
89 | # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
90 | # HTTPCACHE_ENABLED = True
91 | # HTTPCACHE_EXPIRATION_SECS = 0
92 | # HTTPCACHE_DIR = 'httpcache'
93 | # HTTPCACHE_IGNORE_HTTP_CODES = []
94 | # HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
95 | RETRY_ENABLED: True
96 | RETRY_TIMES: 10 #重试次数
97 | CONCURRENT_REQUESTS: 16 #并发数
98 | DOWNLOAD_TIMEOUT = 180 #超时时间
99 | RETRY_HTTP_CODES = [500, 503, 504, 400, 403, 404, 408,403]
100 | TWISTED_REACTOR = 'twisted.internet.asyncioreactor.AsyncioSelectorReactor'
101 |
--------------------------------------------------------------------------------
/porn/porn/spiders/PornSpider.py:
--------------------------------------------------------------------------------
1 | import scrapy
2 | import execjs
3 | import re
4 | from porn.items import PornItem
5 | from urllib.parse import unquote
6 |
7 | with open('/root/m.js', 'r', encoding='UTF-8') as f:# 请自行修改js文件目录
8 | js_code = f.read()
9 | context = execjs.compile(js_code)
10 |
11 | class PornspiderSpider(scrapy.Spider):
12 | name = 'PornSpider'
13 | allowed_domains = ['91porn.com']
14 | start_urls = ['https://91porn.com/v.php']
15 |
16 | def parse(self, response):
17 | data = response.xpath('//div[@class="row"]//div[@class="row"]/div//a/@href').getall()
18 | for row in data:
19 | yield scrapy.Request(url=row, callback=self.parse_info)
20 |
21 | for i in range(2,3936):
22 | url = self.start_urls[0] + '?&page={}'.format(str(i))
23 | yield scrapy.Request(url=url, callback=self.parse)
24 |
25 | def parse_info(self, response):
26 | items = PornItem()
27 | items['url'] = response.request.url
28 | items['title'] = response.xpath('//div[@id="videodetails"][1]/h4/text()').get().strip()
29 | items['add_time'] = response.xpath('//span[@class="title-yakov"]/text()').get()
30 | video_info = response.xpath('//span[@class="video-info-span"]/text()').getall()
31 | items['duration'] = video_info[0]
32 | items['views'] = video_info[1]
33 | items['message'] = video_info[2]
34 | items['collect'] = video_info[3]
35 | vote = response.xpath('//div[@class="counter"]/text()').getall()
36 | items['like'] = vote[0]
37 | items['dislike'] = vote[1]
38 | items['author'] = []
39 | info = response.xpath('//div[@id="videodetails-content"]/span').get()
40 | author = response.xpath('//span[@class="title"]/text()').get()
41 | fans = re.findall(r'粉丝:(\d+)', info)[0]
42 | register = re.findall(r'注册:(.*前)', info)[0]
43 | videos = re.findall(r'(\d+)', info)[0]
44 | items['author'].append({
45 | 'author': author,
46 | 'fans': fans,
47 | 'register_time': register,
48 | 'upload_video': videos
49 | })
50 | try:
51 | encode = response.xpath('//div[@class="video-container"]').re(r'document.write\(strencode\((.*?)\)\)')[0].split(',')
52 | print(encode)
53 | str1 = encode[0].strip('"')
54 | str2 = encode[1].strip('"')
55 | str3 = encode[2].strip('"')
56 | result = context.call("strencode", str1,str2,str3)
57 | items['video_url'] = re.findall(r"src='(https://.*?)'", result)[0]
58 | except IndexError:
59 | encode = response.xpath('//div[@class="video-container"]').re(r'document.write\(strencode2\((.*?)\)\)')[0]
60 | items['video_url'] = re.findall(r"src='(https://.*?)'", unquote(encode))[0]
61 |
62 | # print(items)
63 | yield items
64 |
--------------------------------------------------------------------------------
/porn/porn/spiders/__init__.py:
--------------------------------------------------------------------------------
1 | # This package will contain the spiders of your Scrapy project
2 | #
3 | # Please refer to the documentation for information on how to create and manage
4 | # your spiders.
5 |
--------------------------------------------------------------------------------
/porn/scrapy.cfg:
--------------------------------------------------------------------------------
1 | # Automatically created by: scrapy startproject
2 | #
3 | # For more information about the [deploy] section see:
4 | # https://scrapyd.readthedocs.io/en/latest/deploy.html
5 |
6 | [settings]
7 | default = porn.settings
8 |
9 | # [deploy]
10 | # url = http://0.0.0.0:6800/
11 | # project = porn
12 |
--------------------------------------------------------------------------------
/porn/setup.py:
--------------------------------------------------------------------------------
1 | # Automatically created by: scrapyd-deploy
2 |
3 | from setuptools import setup, find_packages
4 |
5 | setup(
6 | name = 'project',
7 | version = '1.0',
8 | packages = find_packages(),
9 | entry_points = {'scrapy': ['settings = porn.settings']},
10 | )
11 |
--------------------------------------------------------------------------------
/readme.md:
--------------------------------------------------------------------------------
1 | # 2022-06-05
2 | 此仓库已用Scrapy重构
3 |
4 |
5 | # python所需要的库:
6 | + pip install scrapy
7 | + pip install pymongo
8 | + pip install aiohttp
9 | # 使用方法:
10 | 1. 请进入`porn`文件夹,与`settings.py`文件同级,创建`units.py`文件,内容模板如下:
11 | ```python
12 |
13 | proxy_url = '你使用的代理ip服务商提供的接口地址'
14 | MONGO_URI = 'mongodb://127.0.0.1:27017/'#数据库地址
15 |
16 | ```
17 | 2. 请自己创建一个`m.js`文件,并修改`PornSpider.py`文件内的js路径,js文件内容见`m.js`文件
18 |
19 | 3. 运行项目
20 | ```shell
21 | scrapy crawl PornSpider
22 | ```
23 |
24 | # 说明:
25 | + 该网站现视频为m3u8格式文件,但也可以变成视频,可参考第三方包`m3u8_to_MP4`
26 | + 此脚本仅供交流学习使用
27 | + 如有其他更多更好的建议请告诉我
28 | + 网站规则随时可能会改变,后续可能无法及时更新,请自行研究
29 |
30 | # 关于作者:
31 | 邮箱:1176103825@qq.com
32 |
33 |
34 | # 如果你愿意,可以请我喝杯卡布奇诺:
35 |
36 |
37 |
--------------------------------------------------------------------------------
/src/1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xinghe98/91porn/d369900544b78d980f429590f4a5c52423443e98/src/1.jpg
--------------------------------------------------------------------------------